aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap')
-rw-r--r--arch/arm/plat-omap/Kconfig16
-rw-r--r--arch/arm/plat-omap/Makefile2
-rw-r--r--arch/arm/plat-omap/clock.c51
-rw-r--r--arch/arm/plat-omap/counter_32k.c123
-rw-r--r--arch/arm/plat-omap/dmtimer.c213
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev.h13
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h1
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h4
-rw-r--r--arch/arm/plat-omap/include/plat/common.h6
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h35
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h5
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h251
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h20
-rw-r--r--arch/arm/plat-omap/include/plat/i2c.h6
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h77
-rw-r--r--arch/arm/plat-omap/include/plat/iommu2.h4
-rw-r--r--arch/arm/plat-omap/include/plat/iopgtable.h (renamed from arch/arm/plat-omap/iopgtable.h)2
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h30
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h19
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h74
-rw-r--r--arch/arm/plat-omap/include/plat/nand.h6
-rw-r--r--arch/arm/plat-omap/include/plat/omap-pm.h8
-rw-r--r--arch/arm/plat-omap/include/plat/omap4-keypad.h3
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h9
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h35
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h3
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h1
-rw-r--r--arch/arm/plat-omap/iommu-debug.c418
-rw-r--r--arch/arm/plat-omap/iommu.c1102
-rw-r--r--arch/arm/plat-omap/iovmm.c907
-rw-r--r--arch/arm/plat-omap/mcbsp.c604
-rw-r--r--arch/arm/plat-omap/omap_device.c151
-rw-r--r--arch/arm/plat-omap/sram.c15
33 files changed, 673 insertions, 3541 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 49a4c75243f..fa62037f1df 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -13,6 +13,7 @@ config ARCH_OMAP1
13 bool "TI OMAP1" 13 bool "TI OMAP1"
14 select CLKDEV_LOOKUP 14 select CLKDEV_LOOKUP
15 select CLKSRC_MMIO 15 select CLKSRC_MMIO
16 select GENERIC_IRQ_CHIP
16 help 17 help
17 "Systems based on omap7xx, omap15xx or omap16xx" 18 "Systems based on omap7xx, omap15xx or omap16xx"
18 19
@@ -131,18 +132,6 @@ config OMAP_MBOX_KFIFO_SIZE
131 This can also be changed at runtime (via the mbox_kfifo_size 132 This can also be changed at runtime (via the mbox_kfifo_size
132 module parameter). 133 module parameter).
133 134
134config OMAP_IOMMU
135 tristate
136
137config OMAP_IOMMU_DEBUG
138 tristate "Export OMAP IOMMU internals in DebugFS"
139 depends on OMAP_IOMMU && DEBUG_FS
140 help
141 Select this to see extensive information about
142 the internal state of OMAP IOMMU in debugfs.
143
144 Say N unless you know you need this.
145
146config OMAP_IOMMU_IVA2 135config OMAP_IOMMU_IVA2
147 bool 136 bool
148 137
@@ -211,9 +200,6 @@ choice
211 depends on ARCH_OMAP 200 depends on ARCH_OMAP
212 default OMAP_PM_NOOP 201 default OMAP_PM_NOOP
213 202
214config OMAP_PM_NONE
215 bool "No PM layer"
216
217config OMAP_PM_NOOP 203config OMAP_PM_NOOP
218 bool "No-op/debug PM layer" 204 bool "No-op/debug PM layer"
219 205
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index f0233e6abcd..985262242f2 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -18,8 +18,6 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_device.o
18obj-$(CONFIG_ARCH_OMAP4) += omap_device.o 18obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
19 19
20obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o 20obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
21obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o
22obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o
23 21
24obj-$(CONFIG_CPU_FREQ) += cpu-omap.o 22obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
25obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o 23obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index c9122dd6ee8..3ba4d11ca73 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -475,18 +475,48 @@ int __init clk_init(struct clk_functions * custom_clocks)
475/* 475/*
476 * debugfs support to trace clock tree hierarchy and attributes 476 * debugfs support to trace clock tree hierarchy and attributes
477 */ 477 */
478
479#include <linux/debugfs.h>
480#include <linux/seq_file.h>
481
478static struct dentry *clk_debugfs_root; 482static struct dentry *clk_debugfs_root;
479 483
484static int clk_dbg_show_summary(struct seq_file *s, void *unused)
485{
486 struct clk *c;
487 struct clk *pa;
488
489 seq_printf(s, "%-30s %-30s %-10s %s\n",
490 "clock-name", "parent-name", "rate", "use-count");
491
492 list_for_each_entry(c, &clocks, node) {
493 pa = c->parent;
494 seq_printf(s, "%-30s %-30s %-10lu %d\n",
495 c->name, pa ? pa->name : "none", c->rate, c->usecount);
496 }
497
498 return 0;
499}
500
501static int clk_dbg_open(struct inode *inode, struct file *file)
502{
503 return single_open(file, clk_dbg_show_summary, inode->i_private);
504}
505
506static const struct file_operations debug_clock_fops = {
507 .open = clk_dbg_open,
508 .read = seq_read,
509 .llseek = seq_lseek,
510 .release = single_release,
511};
512
480static int clk_debugfs_register_one(struct clk *c) 513static int clk_debugfs_register_one(struct clk *c)
481{ 514{
482 int err; 515 int err;
483 struct dentry *d, *child, *child_tmp; 516 struct dentry *d;
484 struct clk *pa = c->parent; 517 struct clk *pa = c->parent;
485 char s[255];
486 char *p = s;
487 518
488 p += sprintf(p, "%s", c->name); 519 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
489 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
490 if (!d) 520 if (!d)
491 return -ENOMEM; 521 return -ENOMEM;
492 c->dent = d; 522 c->dent = d;
@@ -509,10 +539,7 @@ static int clk_debugfs_register_one(struct clk *c)
509 return 0; 539 return 0;
510 540
511err_out: 541err_out:
512 d = c->dent; 542 debugfs_remove_recursive(c->dent);
513 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
514 debugfs_remove(child);
515 debugfs_remove(c->dent);
516 return err; 543 return err;
517} 544}
518 545
@@ -551,6 +578,12 @@ static int __init clk_debugfs_init(void)
551 if (err) 578 if (err)
552 goto err_out; 579 goto err_out;
553 } 580 }
581
582 d = debugfs_create_file("summary", S_IRUGO,
583 d, NULL, &debug_clock_fops);
584 if (!d)
585 return -ENOMEM;
586
554 return 0; 587 return 0;
555err_out: 588err_out:
556 debugfs_remove_recursive(clk_debugfs_root); 589 debugfs_remove_recursive(clk_debugfs_root);
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index f7fed608019..a6cbb712da5 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -18,6 +18,7 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/clocksource.h>
21 22
22#include <asm/sched_clock.h> 23#include <asm/sched_clock.h>
23 24
@@ -26,87 +27,16 @@
26 27
27#include <plat/clock.h> 28#include <plat/clock.h>
28 29
29
30/* 30/*
31 * 32KHz clocksource ... always available, on pretty most chips except 31 * 32KHz clocksource ... always available, on pretty most chips except
32 * OMAP 730 and 1510. Other timers could be used as clocksources, with 32 * OMAP 730 and 1510. Other timers could be used as clocksources, with
33 * higher resolution in free-running counter modes (e.g. 12 MHz xtal), 33 * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
34 * but systems won't necessarily want to spend resources that way. 34 * but systems won't necessarily want to spend resources that way.
35 */ 35 */
36static void __iomem *timer_32k_base;
36 37
37#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410 38#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
38 39
39#include <linux/clocksource.h>
40
41/*
42 * offset_32k holds the init time counter value. It is then subtracted
43 * from every counter read to achieve a counter that counts time from the
44 * kernel boot (needed for sched_clock()).
45 */
46static u32 offset_32k __read_mostly;
47
48#ifdef CONFIG_ARCH_OMAP16XX
49static cycle_t notrace omap16xx_32k_read(struct clocksource *cs)
50{
51 return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
52}
53#else
54#define omap16xx_32k_read NULL
55#endif
56
57#ifdef CONFIG_SOC_OMAP2420
58static cycle_t notrace omap2420_32k_read(struct clocksource *cs)
59{
60 return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
61}
62#else
63#define omap2420_32k_read NULL
64#endif
65
66#ifdef CONFIG_SOC_OMAP2430
67static cycle_t notrace omap2430_32k_read(struct clocksource *cs)
68{
69 return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
70}
71#else
72#define omap2430_32k_read NULL
73#endif
74
75#ifdef CONFIG_ARCH_OMAP3
76static cycle_t notrace omap34xx_32k_read(struct clocksource *cs)
77{
78 return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
79}
80#else
81#define omap34xx_32k_read NULL
82#endif
83
84#ifdef CONFIG_ARCH_OMAP4
85static cycle_t notrace omap44xx_32k_read(struct clocksource *cs)
86{
87 return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
88}
89#else
90#define omap44xx_32k_read NULL
91#endif
92
93/*
94 * Kernel assumes that sched_clock can be called early but may not have
95 * things ready yet.
96 */
97static cycle_t notrace omap_32k_read_dummy(struct clocksource *cs)
98{
99 return 0;
100}
101
102static struct clocksource clocksource_32k = {
103 .name = "32k_counter",
104 .rating = 250,
105 .read = omap_32k_read_dummy,
106 .mask = CLOCKSOURCE_MASK(32),
107 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
108};
109
110/* 40/*
111 * Returns current time from boot in nsecs. It's OK for this to wrap 41 * Returns current time from boot in nsecs. It's OK for this to wrap
112 * around for now, as it's just a relative time stamp. 42 * around for now, as it's just a relative time stamp.
@@ -122,11 +52,11 @@ static DEFINE_CLOCK_DATA(cd);
122 52
123static inline unsigned long long notrace _omap_32k_sched_clock(void) 53static inline unsigned long long notrace _omap_32k_sched_clock(void)
124{ 54{
125 u32 cyc = clocksource_32k.read(&clocksource_32k); 55 u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
126 return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); 56 return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
127} 57}
128 58
129#ifndef CONFIG_OMAP_MPU_TIMER 59#if defined(CONFIG_OMAP_32K_TIMER) && !defined(CONFIG_OMAP_MPU_TIMER)
130unsigned long long notrace sched_clock(void) 60unsigned long long notrace sched_clock(void)
131{ 61{
132 return _omap_32k_sched_clock(); 62 return _omap_32k_sched_clock();
@@ -140,7 +70,7 @@ unsigned long long notrace omap_32k_sched_clock(void)
140 70
141static void notrace omap_update_sched_clock(void) 71static void notrace omap_update_sched_clock(void)
142{ 72{
143 u32 cyc = clocksource_32k.read(&clocksource_32k); 73 u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
144 update_sched_clock(&cd, cyc, (u32)~0); 74 update_sched_clock(&cd, cyc, (u32)~0);
145} 75}
146 76
@@ -153,6 +83,7 @@ static void notrace omap_update_sched_clock(void)
153 */ 83 */
154static struct timespec persistent_ts; 84static struct timespec persistent_ts;
155static cycles_t cycles, last_cycles; 85static cycles_t cycles, last_cycles;
86static unsigned int persistent_mult, persistent_shift;
156void read_persistent_clock(struct timespec *ts) 87void read_persistent_clock(struct timespec *ts)
157{ 88{
158 unsigned long long nsecs; 89 unsigned long long nsecs;
@@ -160,11 +91,10 @@ void read_persistent_clock(struct timespec *ts)
160 struct timespec *tsp = &persistent_ts; 91 struct timespec *tsp = &persistent_ts;
161 92
162 last_cycles = cycles; 93 last_cycles = cycles;
163 cycles = clocksource_32k.read(&clocksource_32k); 94 cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
164 delta = cycles - last_cycles; 95 delta = cycles - last_cycles;
165 96
166 nsecs = clocksource_cyc2ns(delta, 97 nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
167 clocksource_32k.mult, clocksource_32k.shift);
168 98
169 timespec_add_ns(tsp, nsecs); 99 timespec_add_ns(tsp, nsecs);
170 *ts = *tsp; 100 *ts = *tsp;
@@ -176,29 +106,46 @@ int __init omap_init_clocksource_32k(void)
176 "%s: can't register clocksource!\n"; 106 "%s: can't register clocksource!\n";
177 107
178 if (cpu_is_omap16xx() || cpu_class_is_omap2()) { 108 if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
109 u32 pbase;
110 unsigned long size = SZ_4K;
111 void __iomem *base;
179 struct clk *sync_32k_ick; 112 struct clk *sync_32k_ick;
180 113
181 if (cpu_is_omap16xx()) 114 if (cpu_is_omap16xx()) {
182 clocksource_32k.read = omap16xx_32k_read; 115 pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
183 else if (cpu_is_omap2420()) 116 size = SZ_1K;
184 clocksource_32k.read = omap2420_32k_read; 117 } else if (cpu_is_omap2420())
118 pbase = OMAP2420_32KSYNCT_BASE + 0x10;
185 else if (cpu_is_omap2430()) 119 else if (cpu_is_omap2430())
186 clocksource_32k.read = omap2430_32k_read; 120 pbase = OMAP2430_32KSYNCT_BASE + 0x10;
187 else if (cpu_is_omap34xx()) 121 else if (cpu_is_omap34xx())
188 clocksource_32k.read = omap34xx_32k_read; 122 pbase = OMAP3430_32KSYNCT_BASE + 0x10;
189 else if (cpu_is_omap44xx()) 123 else if (cpu_is_omap44xx())
190 clocksource_32k.read = omap44xx_32k_read; 124 pbase = OMAP4430_32KSYNCT_BASE + 0x10;
191 else 125 else
192 return -ENODEV; 126 return -ENODEV;
193 127
128 /* For this to work we must have a static mapping in io.c for this area */
129 base = ioremap(pbase, size);
130 if (!base)
131 return -ENODEV;
132
194 sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); 133 sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
195 if (!IS_ERR(sync_32k_ick)) 134 if (!IS_ERR(sync_32k_ick))
196 clk_enable(sync_32k_ick); 135 clk_enable(sync_32k_ick);
197 136
198 offset_32k = clocksource_32k.read(&clocksource_32k); 137 timer_32k_base = base;
138
139 /*
140 * 120000 rough estimate from the calculations in
141 * __clocksource_updatefreq_scale.
142 */
143 clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
144 32768, NSEC_PER_SEC, 120000);
199 145
200 if (clocksource_register_hz(&clocksource_32k, 32768)) 146 if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
201 printk(err, clocksource_32k.name); 147 clocksource_mmio_readl_up))
148 printk(err, "32k_counter");
202 149
203 init_fixed_sched_clock(&cd, omap_update_sched_clock, 32, 150 init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
204 32768, SC_MULT, SC_SHIFT); 151 32768, SC_MULT, SC_SHIFT);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index ee9f6ebba29..75a847dd776 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -41,127 +41,6 @@
41#include <plat/dmtimer.h> 41#include <plat/dmtimer.h>
42#include <mach/irqs.h> 42#include <mach/irqs.h>
43 43
44/* register offsets */
45#define _OMAP_TIMER_ID_OFFSET 0x00
46#define _OMAP_TIMER_OCP_CFG_OFFSET 0x10
47#define _OMAP_TIMER_SYS_STAT_OFFSET 0x14
48#define _OMAP_TIMER_STAT_OFFSET 0x18
49#define _OMAP_TIMER_INT_EN_OFFSET 0x1c
50#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20
51#define _OMAP_TIMER_CTRL_OFFSET 0x24
52#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
53#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
54#define OMAP_TIMER_CTRL_PT (1 << 12)
55#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
56#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
57#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
58#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
59#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
60#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
61#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */
62#define OMAP_TIMER_CTRL_POSTED (1 << 2)
63#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
64#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
65#define _OMAP_TIMER_COUNTER_OFFSET 0x28
66#define _OMAP_TIMER_LOAD_OFFSET 0x2c
67#define _OMAP_TIMER_TRIGGER_OFFSET 0x30
68#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34
69#define WP_NONE 0 /* no write pending bit */
70#define WP_TCLR (1 << 0)
71#define WP_TCRR (1 << 1)
72#define WP_TLDR (1 << 2)
73#define WP_TTGR (1 << 3)
74#define WP_TMAR (1 << 4)
75#define WP_TPIR (1 << 5)
76#define WP_TNIR (1 << 6)
77#define WP_TCVR (1 << 7)
78#define WP_TOCR (1 << 8)
79#define WP_TOWR (1 << 9)
80#define _OMAP_TIMER_MATCH_OFFSET 0x38
81#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c
82#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40
83#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */
84#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */
85#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */
86#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */
87#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
88#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
89
90/* register offsets with the write pending bit encoded */
91#define WPSHIFT 16
92
93#define OMAP_TIMER_ID_REG (_OMAP_TIMER_ID_OFFSET \
94 | (WP_NONE << WPSHIFT))
95
96#define OMAP_TIMER_OCP_CFG_REG (_OMAP_TIMER_OCP_CFG_OFFSET \
97 | (WP_NONE << WPSHIFT))
98
99#define OMAP_TIMER_SYS_STAT_REG (_OMAP_TIMER_SYS_STAT_OFFSET \
100 | (WP_NONE << WPSHIFT))
101
102#define OMAP_TIMER_STAT_REG (_OMAP_TIMER_STAT_OFFSET \
103 | (WP_NONE << WPSHIFT))
104
105#define OMAP_TIMER_INT_EN_REG (_OMAP_TIMER_INT_EN_OFFSET \
106 | (WP_NONE << WPSHIFT))
107
108#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
109 | (WP_NONE << WPSHIFT))
110
111#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
112 | (WP_TCLR << WPSHIFT))
113
114#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
115 | (WP_TCRR << WPSHIFT))
116
117#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
118 | (WP_TLDR << WPSHIFT))
119
120#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
121 | (WP_TTGR << WPSHIFT))
122
123#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
124 | (WP_NONE << WPSHIFT))
125
126#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
127 | (WP_TMAR << WPSHIFT))
128
129#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
130 | (WP_NONE << WPSHIFT))
131
132#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
133 | (WP_NONE << WPSHIFT))
134
135#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
136 | (WP_NONE << WPSHIFT))
137
138#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
139 | (WP_TPIR << WPSHIFT))
140
141#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
142 | (WP_TNIR << WPSHIFT))
143
144#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
145 | (WP_TCVR << WPSHIFT))
146
147#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
148 (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
149
150#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
151 (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
152
153struct omap_dm_timer {
154 unsigned long phys_base;
155 int irq;
156#ifdef CONFIG_ARCH_OMAP2PLUS
157 struct clk *iclk, *fclk;
158#endif
159 void __iomem *io_base;
160 unsigned reserved:1;
161 unsigned enabled:1;
162 unsigned posted:1;
163};
164
165static int dm_timer_count; 44static int dm_timer_count;
166 45
167#ifdef CONFIG_ARCH_OMAP1 46#ifdef CONFIG_ARCH_OMAP1
@@ -291,11 +170,7 @@ static spinlock_t dm_timer_lock;
291 */ 170 */
292static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg) 171static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
293{ 172{
294 if (timer->posted) 173 return __omap_dm_timer_read(timer->io_base, reg, timer->posted);
295 while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
296 & (reg >> WPSHIFT))
297 cpu_relax();
298 return readl(timer->io_base + (reg & 0xff));
299} 174}
300 175
301/* 176/*
@@ -307,11 +182,7 @@ static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
307static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg, 182static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
308 u32 value) 183 u32 value)
309{ 184{
310 if (timer->posted) 185 __omap_dm_timer_write(timer->io_base, reg, value, timer->posted);
311 while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
312 & (reg >> WPSHIFT))
313 cpu_relax();
314 writel(value, timer->io_base + (reg & 0xff));
315} 186}
316 187
317static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer) 188static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
@@ -330,7 +201,7 @@ static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
330 201
331static void omap_dm_timer_reset(struct omap_dm_timer *timer) 202static void omap_dm_timer_reset(struct omap_dm_timer *timer)
332{ 203{
333 u32 l; 204 int autoidle = 0, wakeup = 0;
334 205
335 if (!cpu_class_is_omap2() || timer != &dm_timers[0]) { 206 if (!cpu_class_is_omap2() || timer != &dm_timers[0]) {
336 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06); 207 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
@@ -338,28 +209,21 @@ static void omap_dm_timer_reset(struct omap_dm_timer *timer)
338 } 209 }
339 omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ); 210 omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
340 211
341 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG); 212 /* Enable autoidle on OMAP2+ */
342 l |= 0x02 << 3; /* Set to smart-idle mode */ 213 if (cpu_class_is_omap2())
343 l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */ 214 autoidle = 1;
344
345 /* Enable autoidle on OMAP2 / OMAP3 */
346 if (cpu_is_omap24xx() || cpu_is_omap34xx())
347 l |= 0x1 << 0;
348 215
349 /* 216 /*
350 * Enable wake-up on OMAP2 CPUs. 217 * Enable wake-up on OMAP2 CPUs.
351 */ 218 */
352 if (cpu_class_is_omap2()) 219 if (cpu_class_is_omap2())
353 l |= 1 << 2; 220 wakeup = 1;
354 omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
355 221
356 /* Match hardware reset default of posted mode */ 222 __omap_dm_timer_reset(timer->io_base, autoidle, wakeup);
357 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
358 OMAP_TIMER_CTRL_POSTED);
359 timer->posted = 1; 223 timer->posted = 1;
360} 224}
361 225
362static void omap_dm_timer_prepare(struct omap_dm_timer *timer) 226void omap_dm_timer_prepare(struct omap_dm_timer *timer)
363{ 227{
364 omap_dm_timer_enable(timer); 228 omap_dm_timer_enable(timer);
365 omap_dm_timer_reset(timer); 229 omap_dm_timer_reset(timer);
@@ -531,25 +395,13 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_start);
531 395
532void omap_dm_timer_stop(struct omap_dm_timer *timer) 396void omap_dm_timer_stop(struct omap_dm_timer *timer)
533{ 397{
534 u32 l; 398 unsigned long rate = 0;
535 399
536 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
537 if (l & OMAP_TIMER_CTRL_ST) {
538 l &= ~0x1;
539 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
540#ifdef CONFIG_ARCH_OMAP2PLUS 400#ifdef CONFIG_ARCH_OMAP2PLUS
541 /* Readback to make sure write has completed */ 401 rate = clk_get_rate(timer->fclk);
542 omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
543 /*
544 * Wait for functional clock period x 3.5 to make sure that
545 * timer is stopped
546 */
547 udelay(3500000 / clk_get_rate(timer->fclk) + 1);
548#endif 402#endif
549 } 403
550 /* Ack possibly pending interrupt */ 404 __omap_dm_timer_stop(timer->io_base, timer->posted, rate);
551 omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
552 OMAP_TIMER_INT_OVERFLOW);
553} 405}
554EXPORT_SYMBOL_GPL(omap_dm_timer_stop); 406EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
555 407
@@ -572,22 +424,11 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
572 424
573int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) 425int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
574{ 426{
575 int ret = -EINVAL;
576
577 if (source < 0 || source >= 3) 427 if (source < 0 || source >= 3)
578 return -EINVAL; 428 return -EINVAL;
579 429
580 clk_disable(timer->fclk); 430 return __omap_dm_timer_set_source(timer->fclk,
581 ret = clk_set_parent(timer->fclk, dm_source_clocks[source]); 431 dm_source_clocks[source]);
582 clk_enable(timer->fclk);
583
584 /*
585 * When the functional clock disappears, too quick writes seem
586 * to cause an abort. XXX Is this still necessary?
587 */
588 __delay(300000);
589
590 return ret;
591} 432}
592EXPORT_SYMBOL_GPL(omap_dm_timer_set_source); 433EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
593 434
@@ -625,8 +466,7 @@ void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
625 } 466 }
626 l |= OMAP_TIMER_CTRL_ST; 467 l |= OMAP_TIMER_CTRL_ST;
627 468
628 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, load); 469 __omap_dm_timer_load_start(timer->io_base, l, load, timer->posted);
629 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
630} 470}
631EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start); 471EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
632 472
@@ -679,8 +519,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
679void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, 519void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
680 unsigned int value) 520 unsigned int value)
681{ 521{
682 omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value); 522 __omap_dm_timer_int_enable(timer->io_base, value);
683 omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
684} 523}
685EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable); 524EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
686 525
@@ -696,17 +535,13 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
696 535
697void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value) 536void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
698{ 537{
699 omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value); 538 __omap_dm_timer_write_status(timer->io_base, value);
700} 539}
701EXPORT_SYMBOL_GPL(omap_dm_timer_write_status); 540EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
702 541
703unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer) 542unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
704{ 543{
705 unsigned int l; 544 return __omap_dm_timer_read_counter(timer->io_base, timer->posted);
706
707 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
708
709 return l;
710} 545}
711EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter); 546EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
712 547
@@ -737,7 +572,7 @@ int omap_dm_timers_active(void)
737} 572}
738EXPORT_SYMBOL_GPL(omap_dm_timers_active); 573EXPORT_SYMBOL_GPL(omap_dm_timers_active);
739 574
740int __init omap_dm_timer_init(void) 575static int __init omap_dm_timer_init(void)
741{ 576{
742 struct omap_dm_timer *timer; 577 struct omap_dm_timer *timer;
743 int i, map_size = SZ_8K; /* Module 4KB + L4 4KB except on omap1 */ 578 int i, map_size = SZ_8K; /* Module 4KB + L4 4KB except on omap1 */
@@ -790,8 +625,16 @@ int __init omap_dm_timer_init(void)
790 sprintf(clk_name, "gpt%d_fck", i + 1); 625 sprintf(clk_name, "gpt%d_fck", i + 1);
791 timer->fclk = clk_get(NULL, clk_name); 626 timer->fclk = clk_get(NULL, clk_name);
792 } 627 }
628
629 /* One or two timers may be set up early for sys_timer */
630 if (sys_timer_reserved & (1 << i)) {
631 timer->reserved = 1;
632 timer->posted = 1;
633 }
793#endif 634#endif
794 } 635 }
795 636
796 return 0; 637 return 0;
797} 638}
639
640arch_initcall(omap_dm_timer_init);
diff --git a/arch/arm/plat-omap/include/plat/clkdev.h b/arch/arm/plat-omap/include/plat/clkdev.h
deleted file mode 100644
index 730c49d1ebd..00000000000
--- a/arch/arm/plat-omap/include/plat/clkdev.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __MACH_CLKDEV_H
2#define __MACH_CLKDEV_H
3
4static inline int __clk_get(struct clk *clk)
5{
6 return 1;
7}
8
9static inline void __clk_put(struct clk *clk)
10{
11}
12
13#endif
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index f1899a3e417..387a9638991 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -39,6 +39,7 @@ struct omap_clk {
39#define CK_36XX (1 << 10) /* 36xx/37xx-specific clocks */ 39#define CK_36XX (1 << 10) /* 36xx/37xx-specific clocks */
40#define CK_443X (1 << 11) 40#define CK_443X (1 << 11)
41#define CK_TI816X (1 << 12) 41#define CK_TI816X (1 << 12)
42#define CK_446X (1 << 13)
42 43
43 44
44#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS) 45#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 006e599c661..df4b9683f17 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -58,10 +58,12 @@ struct clkops {
58#define RATE_IN_36XX (1 << 4) 58#define RATE_IN_36XX (1 << 4)
59#define RATE_IN_4430 (1 << 5) 59#define RATE_IN_4430 (1 << 5)
60#define RATE_IN_TI816X (1 << 6) 60#define RATE_IN_TI816X (1 << 6)
61#define RATE_IN_4460 (1 << 7)
61 62
62#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X) 63#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
63#define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS) 64#define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
64#define RATE_IN_3XXX (RATE_IN_34XX | RATE_IN_36XX) 65#define RATE_IN_3XXX (RATE_IN_34XX | RATE_IN_36XX)
66#define RATE_IN_44XX (RATE_IN_4430 | RATE_IN_4460)
65 67
66/* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */ 68/* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */
67#define RATE_IN_3430ES2PLUS_36XX (RATE_IN_3430ES2PLUS | RATE_IN_36XX) 69#define RATE_IN_3430ES2PLUS_36XX (RATE_IN_3430ES2PLUS | RATE_IN_36XX)
@@ -152,7 +154,7 @@ struct dpll_data {
152 u16 max_multiplier; 154 u16 max_multiplier;
153 u8 last_rounded_n; 155 u8 last_rounded_n;
154 u8 min_divider; 156 u8 min_divider;
155 u8 max_divider; 157 u16 max_divider;
156 u8 modes; 158 u8 modes;
157#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) 159#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
158 void __iomem *autoidle_reg; 160 void __iomem *autoidle_reg;
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 5288130be96..4564cc697d7 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -34,7 +34,11 @@
34struct sys_timer; 34struct sys_timer;
35 35
36extern void omap_map_common_io(void); 36extern void omap_map_common_io(void);
37extern struct sys_timer omap_timer; 37extern struct sys_timer omap1_timer;
38extern struct sys_timer omap2_timer;
39extern struct sys_timer omap3_timer;
40extern struct sys_timer omap3_secure_timer;
41extern struct sys_timer omap4_timer;
38extern bool omap_32k_timer_init(void); 42extern bool omap_32k_timer_init(void);
39extern int __init omap_init_clocksource_32k(void); 43extern int __init omap_init_clocksource_32k(void);
40extern unsigned long long notrace omap_32k_sched_clock(void); 44extern unsigned long long notrace omap_32k_sched_clock(void);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 8198bb6cdb5..67b3d75884c 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -88,6 +88,7 @@ unsigned int omap_rev(void);
88 * cpu_is_omap243x(): True for OMAP2430 88 * cpu_is_omap243x(): True for OMAP2430
89 * cpu_is_omap343x(): True for OMAP3430 89 * cpu_is_omap343x(): True for OMAP3430
90 * cpu_is_omap443x(): True for OMAP4430 90 * cpu_is_omap443x(): True for OMAP4430
91 * cpu_is_omap446x(): True for OMAP4460
91 */ 92 */
92#define GET_OMAP_CLASS (omap_rev() & 0xff) 93#define GET_OMAP_CLASS (omap_rev() & 0xff)
93 94
@@ -123,6 +124,7 @@ IS_OMAP_SUBCLASS(243x, 0x243)
123IS_OMAP_SUBCLASS(343x, 0x343) 124IS_OMAP_SUBCLASS(343x, 0x343)
124IS_OMAP_SUBCLASS(363x, 0x363) 125IS_OMAP_SUBCLASS(363x, 0x363)
125IS_OMAP_SUBCLASS(443x, 0x443) 126IS_OMAP_SUBCLASS(443x, 0x443)
127IS_OMAP_SUBCLASS(446x, 0x446)
126 128
127IS_TI_SUBCLASS(816x, 0x816) 129IS_TI_SUBCLASS(816x, 0x816)
128 130
@@ -137,6 +139,7 @@ IS_TI_SUBCLASS(816x, 0x816)
137#define cpu_is_ti816x() 0 139#define cpu_is_ti816x() 0
138#define cpu_is_omap44xx() 0 140#define cpu_is_omap44xx() 0
139#define cpu_is_omap443x() 0 141#define cpu_is_omap443x() 0
142#define cpu_is_omap446x() 0
140 143
141#if defined(MULTI_OMAP1) 144#if defined(MULTI_OMAP1)
142# if defined(CONFIG_ARCH_OMAP730) 145# if defined(CONFIG_ARCH_OMAP730)
@@ -361,8 +364,10 @@ IS_OMAP_TYPE(3517, 0x3517)
361# if defined(CONFIG_ARCH_OMAP4) 364# if defined(CONFIG_ARCH_OMAP4)
362# undef cpu_is_omap44xx 365# undef cpu_is_omap44xx
363# undef cpu_is_omap443x 366# undef cpu_is_omap443x
367# undef cpu_is_omap446x
364# define cpu_is_omap44xx() is_omap44xx() 368# define cpu_is_omap44xx() is_omap44xx()
365# define cpu_is_omap443x() is_omap443x() 369# define cpu_is_omap443x() is_omap443x()
370# define cpu_is_omap446x() is_omap446x()
366# endif 371# endif
367 372
368/* Macros to detect if we have OMAP1 or OMAP2 */ 373/* Macros to detect if we have OMAP1 or OMAP2 */
@@ -410,6 +415,9 @@ IS_OMAP_TYPE(3517, 0x3517)
410#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8)) 415#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
411#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8)) 416#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
412 417
418#define OMAP446X_CLASS 0x44600044
419#define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8))
420
413/* 421/*
414 * omap_chip bits 422 * omap_chip bits
415 * 423 *
@@ -439,13 +447,15 @@ IS_OMAP_TYPE(3517, 0x3517)
439#define CHIP_IS_OMAP4430ES2_1 (1 << 12) 447#define CHIP_IS_OMAP4430ES2_1 (1 << 12)
440#define CHIP_IS_OMAP4430ES2_2 (1 << 13) 448#define CHIP_IS_OMAP4430ES2_2 (1 << 13)
441#define CHIP_IS_TI816X (1 << 14) 449#define CHIP_IS_TI816X (1 << 14)
450#define CHIP_IS_OMAP4460ES1_0 (1 << 15)
442 451
443#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430) 452#define CHIP_IS_OMAP24XX (CHIP_IS_OMAP2420 | CHIP_IS_OMAP2430)
444 453
445#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \ 454#define CHIP_IS_OMAP4430 (CHIP_IS_OMAP4430ES1 | \
446 CHIP_IS_OMAP4430ES2 | \ 455 CHIP_IS_OMAP4430ES2 | \
447 CHIP_IS_OMAP4430ES2_1 | \ 456 CHIP_IS_OMAP4430ES2_1 | \
448 CHIP_IS_OMAP4430ES2_2) 457 CHIP_IS_OMAP4430ES2_2 | \
458 CHIP_IS_OMAP4460ES1_0)
449 459
450/* 460/*
451 * "GE" here represents "greater than or equal to" in terms of ES 461 * "GE" here represents "greater than or equal to" in terms of ES
@@ -468,7 +478,7 @@ void omap2_check_revision(void);
468/* 478/*
469 * Runtime detection of OMAP3 features 479 * Runtime detection of OMAP3 features
470 */ 480 */
471extern u32 omap3_features; 481extern u32 omap_features;
472 482
473#define OMAP3_HAS_L2CACHE BIT(0) 483#define OMAP3_HAS_L2CACHE BIT(0)
474#define OMAP3_HAS_IVA BIT(1) 484#define OMAP3_HAS_IVA BIT(1)
@@ -478,11 +488,15 @@ extern u32 omap3_features;
478#define OMAP3_HAS_192MHZ_CLK BIT(5) 488#define OMAP3_HAS_192MHZ_CLK BIT(5)
479#define OMAP3_HAS_IO_WAKEUP BIT(6) 489#define OMAP3_HAS_IO_WAKEUP BIT(6)
480#define OMAP3_HAS_SDRC BIT(7) 490#define OMAP3_HAS_SDRC BIT(7)
491#define OMAP4_HAS_MPU_1GHZ BIT(8)
492#define OMAP4_HAS_MPU_1_2GHZ BIT(9)
493#define OMAP4_HAS_MPU_1_5GHZ BIT(10)
494
481 495
482#define OMAP3_HAS_FEATURE(feat,flag) \ 496#define OMAP3_HAS_FEATURE(feat,flag) \
483static inline unsigned int omap3_has_ ##feat(void) \ 497static inline unsigned int omap3_has_ ##feat(void) \
484{ \ 498{ \
485 return (omap3_features & OMAP3_HAS_ ##flag); \ 499 return omap_features & OMAP3_HAS_ ##flag; \
486} \ 500} \
487 501
488OMAP3_HAS_FEATURE(l2cache, L2CACHE) 502OMAP3_HAS_FEATURE(l2cache, L2CACHE)
@@ -494,4 +508,19 @@ OMAP3_HAS_FEATURE(192mhz_clk, 192MHZ_CLK)
494OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP) 508OMAP3_HAS_FEATURE(io_wakeup, IO_WAKEUP)
495OMAP3_HAS_FEATURE(sdrc, SDRC) 509OMAP3_HAS_FEATURE(sdrc, SDRC)
496 510
511/*
512 * Runtime detection of OMAP4 features
513 */
514extern u32 omap_features;
515
516#define OMAP4_HAS_FEATURE(feat, flag) \
517static inline unsigned int omap4_has_ ##feat(void) \
518{ \
519 return omap_features & OMAP4_HAS_ ##flag; \
520} \
521
522OMAP4_HAS_FEATURE(mpu_1ghz, MPU_1GHZ)
523OMAP4_HAS_FEATURE(mpu_1_2ghz, MPU_1_2GHZ)
524OMAP4_HAS_FEATURE(mpu_1_5ghz, MPU_1_5GHZ)
525
497#endif 526#endif
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index d1c916fcf77..dc562a5c0a8 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -195,6 +195,11 @@
195 195
196#define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */ 196#define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */
197#define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */ 197#define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */
198
199/* Only for AM35xx */
200#define AM35XX_DMA_UART4_TX 54
201#define AM35XX_DMA_UART4_RX 55
202
198/*----------------------------------------------------------------------------*/ 203/*----------------------------------------------------------------------------*/
199 204
200#define OMAP1_DMA_TOUT_IRQ (1 << 0) 205#define OMAP1_DMA_TOUT_IRQ (1 << 0)
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index d6c70d2f403..eb5d16c60cd 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -32,6 +32,10 @@
32 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 */ 33 */
34 34
35#include <linux/clk.h>
36#include <linux/delay.h>
37#include <linux/io.h>
38
35#ifndef __ASM_ARCH_DMTIMER_H 39#ifndef __ASM_ARCH_DMTIMER_H
36#define __ASM_ARCH_DMTIMER_H 40#define __ASM_ARCH_DMTIMER_H
37 41
@@ -56,12 +60,8 @@
56 */ 60 */
57#define OMAP_TIMER_IP_VERSION_1 0x1 61#define OMAP_TIMER_IP_VERSION_1 0x1
58struct omap_dm_timer; 62struct omap_dm_timer;
59extern struct omap_dm_timer *gptimer_wakeup;
60extern struct sys_timer omap_timer;
61struct clk; 63struct clk;
62 64
63int omap_dm_timer_init(void);
64
65struct omap_dm_timer *omap_dm_timer_request(void); 65struct omap_dm_timer *omap_dm_timer_request(void);
66struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id); 66struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id);
67void omap_dm_timer_free(struct omap_dm_timer *timer); 67void omap_dm_timer_free(struct omap_dm_timer *timer);
@@ -93,5 +93,248 @@ void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value
93 93
94int omap_dm_timers_active(void); 94int omap_dm_timers_active(void);
95 95
96/*
97 * Do not use the defines below, they are not needed. They should be only
98 * used by dmtimer.c and sys_timer related code.
99 */
100
101/* register offsets */
102#define _OMAP_TIMER_ID_OFFSET 0x00
103#define _OMAP_TIMER_OCP_CFG_OFFSET 0x10
104#define _OMAP_TIMER_SYS_STAT_OFFSET 0x14
105#define _OMAP_TIMER_STAT_OFFSET 0x18
106#define _OMAP_TIMER_INT_EN_OFFSET 0x1c
107#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20
108#define _OMAP_TIMER_CTRL_OFFSET 0x24
109#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
110#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
111#define OMAP_TIMER_CTRL_PT (1 << 12)
112#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
113#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
114#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
115#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
116#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
117#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
118#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */
119#define OMAP_TIMER_CTRL_POSTED (1 << 2)
120#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
121#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
122#define _OMAP_TIMER_COUNTER_OFFSET 0x28
123#define _OMAP_TIMER_LOAD_OFFSET 0x2c
124#define _OMAP_TIMER_TRIGGER_OFFSET 0x30
125#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34
126#define WP_NONE 0 /* no write pending bit */
127#define WP_TCLR (1 << 0)
128#define WP_TCRR (1 << 1)
129#define WP_TLDR (1 << 2)
130#define WP_TTGR (1 << 3)
131#define WP_TMAR (1 << 4)
132#define WP_TPIR (1 << 5)
133#define WP_TNIR (1 << 6)
134#define WP_TCVR (1 << 7)
135#define WP_TOCR (1 << 8)
136#define WP_TOWR (1 << 9)
137#define _OMAP_TIMER_MATCH_OFFSET 0x38
138#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c
139#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40
140#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */
141#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */
142#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */
143#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */
144#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
145#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
146
147/* register offsets with the write pending bit encoded */
148#define WPSHIFT 16
149
150#define OMAP_TIMER_ID_REG (_OMAP_TIMER_ID_OFFSET \
151 | (WP_NONE << WPSHIFT))
152
153#define OMAP_TIMER_OCP_CFG_REG (_OMAP_TIMER_OCP_CFG_OFFSET \
154 | (WP_NONE << WPSHIFT))
155
156#define OMAP_TIMER_SYS_STAT_REG (_OMAP_TIMER_SYS_STAT_OFFSET \
157 | (WP_NONE << WPSHIFT))
158
159#define OMAP_TIMER_STAT_REG (_OMAP_TIMER_STAT_OFFSET \
160 | (WP_NONE << WPSHIFT))
161
162#define OMAP_TIMER_INT_EN_REG (_OMAP_TIMER_INT_EN_OFFSET \
163 | (WP_NONE << WPSHIFT))
164
165#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
166 | (WP_NONE << WPSHIFT))
167
168#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
169 | (WP_TCLR << WPSHIFT))
170
171#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
172 | (WP_TCRR << WPSHIFT))
173
174#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
175 | (WP_TLDR << WPSHIFT))
176
177#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
178 | (WP_TTGR << WPSHIFT))
179
180#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
181 | (WP_NONE << WPSHIFT))
182
183#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
184 | (WP_TMAR << WPSHIFT))
185
186#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
187 | (WP_NONE << WPSHIFT))
188
189#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
190 | (WP_NONE << WPSHIFT))
191
192#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
193 | (WP_NONE << WPSHIFT))
194
195#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
196 | (WP_TPIR << WPSHIFT))
197
198#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
199 | (WP_TNIR << WPSHIFT))
200
201#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
202 | (WP_TCVR << WPSHIFT))
203
204#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
205 (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
206
207#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
208 (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
209
210struct omap_dm_timer {
211 unsigned long phys_base;
212 int irq;
213#ifdef CONFIG_ARCH_OMAP2PLUS
214 struct clk *iclk, *fclk;
215#endif
216 void __iomem *io_base;
217 unsigned long rate;
218 unsigned reserved:1;
219 unsigned enabled:1;
220 unsigned posted:1;
221};
222
223extern u32 sys_timer_reserved;
224void omap_dm_timer_prepare(struct omap_dm_timer *timer);
225
226static inline u32 __omap_dm_timer_read(void __iomem *base, u32 reg,
227 int posted)
228{
229 if (posted)
230 while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
231 & (reg >> WPSHIFT))
232 cpu_relax();
233
234 return __raw_readl(base + (reg & 0xff));
235}
236
237static inline void __omap_dm_timer_write(void __iomem *base, u32 reg, u32 val,
238 int posted)
239{
240 if (posted)
241 while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
242 & (reg >> WPSHIFT))
243 cpu_relax();
244
245 __raw_writel(val, base + (reg & 0xff));
246}
247
248/* Assumes the source clock has been set by caller */
249static inline void __omap_dm_timer_reset(void __iomem *base, int autoidle,
250 int wakeup)
251{
252 u32 l;
253
254 l = __omap_dm_timer_read(base, OMAP_TIMER_OCP_CFG_REG, 0);
255 l |= 0x02 << 3; /* Set to smart-idle mode */
256 l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */
257
258 if (autoidle)
259 l |= 0x1 << 0;
260
261 if (wakeup)
262 l |= 1 << 2;
263
264 __omap_dm_timer_write(base, OMAP_TIMER_OCP_CFG_REG, l, 0);
265
266 /* Match hardware reset default of posted mode */
267 __omap_dm_timer_write(base, OMAP_TIMER_IF_CTRL_REG,
268 OMAP_TIMER_CTRL_POSTED, 0);
269}
270
271static inline int __omap_dm_timer_set_source(struct clk *timer_fck,
272 struct clk *parent)
273{
274 int ret;
275
276 clk_disable(timer_fck);
277 ret = clk_set_parent(timer_fck, parent);
278 clk_enable(timer_fck);
279
280 /*
281 * When the functional clock disappears, too quick writes seem
282 * to cause an abort. XXX Is this still necessary?
283 */
284 __delay(300000);
285
286 return ret;
287}
288
289static inline void __omap_dm_timer_stop(void __iomem *base, int posted,
290 unsigned long rate)
291{
292 u32 l;
293
294 l = __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted);
295 if (l & OMAP_TIMER_CTRL_ST) {
296 l &= ~0x1;
297 __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, l, posted);
298#ifdef CONFIG_ARCH_OMAP2PLUS
299 /* Readback to make sure write has completed */
300 __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted);
301 /*
302 * Wait for functional clock period x 3.5 to make sure that
303 * timer is stopped
304 */
305 udelay(3500000 / rate + 1);
306#endif
307 }
308
309 /* Ack possibly pending interrupt */
310 __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG,
311 OMAP_TIMER_INT_OVERFLOW, 0);
312}
313
314static inline void __omap_dm_timer_load_start(void __iomem *base, u32 ctrl,
315 unsigned int load, int posted)
316{
317 __omap_dm_timer_write(base, OMAP_TIMER_COUNTER_REG, load, posted);
318 __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, ctrl, posted);
319}
320
321static inline void __omap_dm_timer_int_enable(void __iomem *base,
322 unsigned int value)
323{
324 __omap_dm_timer_write(base, OMAP_TIMER_INT_EN_REG, value, 0);
325 __omap_dm_timer_write(base, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
326}
327
328static inline unsigned int __omap_dm_timer_read_counter(void __iomem *base,
329 int posted)
330{
331 return __omap_dm_timer_read(base, OMAP_TIMER_COUNTER_REG, posted);
332}
333
334static inline void __omap_dm_timer_write_status(void __iomem *base,
335 unsigned int value)
336{
337 __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG, value, 0);
338}
96 339
97#endif /* __ASM_ARCH_DMTIMER_H */ 340#endif /* __ASM_ARCH_DMTIMER_H */
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index ec97e00cb58..91e8de3db08 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -174,12 +174,32 @@ struct omap_gpio_dev_attr {
174 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ 174 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
175}; 175};
176 176
177struct omap_gpio_reg_offs {
178 u16 revision;
179 u16 direction;
180 u16 datain;
181 u16 dataout;
182 u16 set_dataout;
183 u16 clr_dataout;
184 u16 irqstatus;
185 u16 irqstatus2;
186 u16 irqenable;
187 u16 set_irqenable;
188 u16 clr_irqenable;
189 u16 debounce;
190 u16 debounce_en;
191
192 bool irqenable_inv;
193};
194
177struct omap_gpio_platform_data { 195struct omap_gpio_platform_data {
178 u16 virtual_irq_start; 196 u16 virtual_irq_start;
179 int bank_type; 197 int bank_type;
180 int bank_width; /* GPIO bank width */ 198 int bank_width; /* GPIO bank width */
181 int bank_stride; /* Only needed for omap1 MPUIO */ 199 int bank_stride; /* Only needed for omap1 MPUIO */
182 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ 200 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
201
202 struct omap_gpio_reg_offs *regs;
183}; 203};
184 204
185/* TODO: Analyze removing gpio_bank_count usage from driver code */ 205/* TODO: Analyze removing gpio_bank_count usage from driver code */
diff --git a/arch/arm/plat-omap/include/plat/i2c.h b/arch/arm/plat-omap/include/plat/i2c.h
index 878d632c409..7c22b9e10dc 100644
--- a/arch/arm/plat-omap/include/plat/i2c.h
+++ b/arch/arm/plat-omap/include/plat/i2c.h
@@ -22,6 +22,7 @@
22#define __ASM__ARCH_OMAP_I2C_H 22#define __ASM__ARCH_OMAP_I2C_H
23 23
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/i2c-omap.h>
25 26
26#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE) 27#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
27extern int omap_register_i2c_bus(int bus_id, u32 clkrate, 28extern int omap_register_i2c_bus(int bus_id, u32 clkrate,
@@ -46,10 +47,13 @@ static inline int omap_register_i2c_bus(int bus_id, u32 clkrate,
46 */ 47 */
47struct omap_i2c_dev_attr { 48struct omap_i2c_dev_attr {
48 u8 fifo_depth; 49 u8 fifo_depth;
49 u8 flags; 50 u32 flags;
50}; 51};
51 52
52void __init omap1_i2c_mux_pins(int bus_id); 53void __init omap1_i2c_mux_pins(int bus_id);
53void __init omap2_i2c_mux_pins(int bus_id); 54void __init omap2_i2c_mux_pins(int bus_id);
54 55
56struct omap_hwmod;
57int omap_i2c_reset(struct omap_hwmod *oh);
58
55#endif /* __ASM__ARCH_OMAP_I2C_H */ 59#endif /* __ASM__ARCH_OMAP_I2C_H */
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 174f1b9c8c0..7a6ec98a08e 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -25,16 +25,17 @@ struct iotlb_entry {
25 }; 25 };
26}; 26};
27 27
28struct iommu { 28struct omap_iommu {
29 const char *name; 29 const char *name;
30 struct module *owner; 30 struct module *owner;
31 struct clk *clk; 31 struct clk *clk;
32 void __iomem *regbase; 32 void __iomem *regbase;
33 struct device *dev; 33 struct device *dev;
34 void *isr_priv; 34 void *isr_priv;
35 struct iommu_domain *domain;
35 36
36 unsigned int refcount; 37 unsigned int refcount;
37 struct mutex iommu_lock; /* global for this whole object */ 38 spinlock_t iommu_lock; /* global for this whole object */
38 39
39 /* 40 /*
40 * We don't change iopgd for a situation like pgd for a task, 41 * We don't change iopgd for a situation like pgd for a task,
@@ -48,8 +49,6 @@ struct iommu {
48 struct list_head mmap; 49 struct list_head mmap;
49 struct mutex mmap_lock; /* protect mmap */ 50 struct mutex mmap_lock; /* protect mmap */
50 51
51 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv);
52
53 void *ctx; /* iommu context: registres saved area */ 52 void *ctx; /* iommu context: registres saved area */
54 u32 da_start; 53 u32 da_start;
55 u32 da_end; 54 u32 da_end;
@@ -81,25 +80,27 @@ struct iotlb_lock {
81struct iommu_functions { 80struct iommu_functions {
82 unsigned long version; 81 unsigned long version;
83 82
84 int (*enable)(struct iommu *obj); 83 int (*enable)(struct omap_iommu *obj);
85 void (*disable)(struct iommu *obj); 84 void (*disable)(struct omap_iommu *obj);
86 void (*set_twl)(struct iommu *obj, bool on); 85 void (*set_twl)(struct omap_iommu *obj, bool on);
87 u32 (*fault_isr)(struct iommu *obj, u32 *ra); 86 u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
88 87
89 void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); 88 void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
90 void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); 89 void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
91 90
92 struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); 91 struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
92 struct iotlb_entry *e);
93 int (*cr_valid)(struct cr_regs *cr); 93 int (*cr_valid)(struct cr_regs *cr);
94 u32 (*cr_to_virt)(struct cr_regs *cr); 94 u32 (*cr_to_virt)(struct cr_regs *cr);
95 void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); 95 void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
96 ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); 96 ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
97 char *buf);
97 98
98 u32 (*get_pte_attr)(struct iotlb_entry *e); 99 u32 (*get_pte_attr)(struct iotlb_entry *e);
99 100
100 void (*save_ctx)(struct iommu *obj); 101 void (*save_ctx)(struct omap_iommu *obj);
101 void (*restore_ctx)(struct iommu *obj); 102 void (*restore_ctx)(struct omap_iommu *obj);
102 ssize_t (*dump_ctx)(struct iommu *obj, char *buf, ssize_t len); 103 ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
103}; 104};
104 105
105struct iommu_platform_data { 106struct iommu_platform_data {
@@ -150,40 +151,30 @@ struct iommu_platform_data {
150/* 151/*
151 * global functions 152 * global functions
152 */ 153 */
153extern u32 iommu_arch_version(void); 154extern u32 omap_iommu_arch_version(void);
154 155
155extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); 156extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
156extern u32 iotlb_cr_to_virt(struct cr_regs *cr); 157
157 158extern int
158extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e); 159omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
159extern void iommu_set_twl(struct iommu *obj, bool on); 160
160extern void flush_iotlb_page(struct iommu *obj, u32 da); 161extern int omap_iommu_set_isr(const char *name,
161extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end); 162 int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs,
162extern void flush_iotlb_all(struct iommu *obj);
163
164extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e);
165extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd,
166 u32 **ppte);
167extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
168
169extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
170extern struct iommu *iommu_get(const char *name);
171extern void iommu_put(struct iommu *obj);
172extern int iommu_set_isr(const char *name,
173 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
174 void *priv), 163 void *priv),
175 void *isr_priv); 164 void *isr_priv);
176 165
177extern void iommu_save_ctx(struct iommu *obj); 166extern void omap_iommu_save_ctx(struct device *dev);
178extern void iommu_restore_ctx(struct iommu *obj); 167extern void omap_iommu_restore_ctx(struct device *dev);
179 168
180extern int install_iommu_arch(const struct iommu_functions *ops); 169extern int omap_install_iommu_arch(const struct iommu_functions *ops);
181extern void uninstall_iommu_arch(const struct iommu_functions *ops); 170extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
182 171
183extern int foreach_iommu_device(void *data, 172extern int omap_foreach_iommu_device(void *data,
184 int (*fn)(struct device *, void *)); 173 int (*fn)(struct device *, void *));
185 174
186extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); 175extern ssize_t
187extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); 176omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
177extern size_t
178omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
188 179
189#endif /* __MACH_IOMMU_H */ 180#endif /* __MACH_IOMMU_H */
diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h
index 10ad05f410e..d4116b595e4 100644
--- a/arch/arm/plat-omap/include/plat/iommu2.h
+++ b/arch/arm/plat-omap/include/plat/iommu2.h
@@ -83,12 +83,12 @@
83/* 83/*
84 * register accessors 84 * register accessors
85 */ 85 */
86static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) 86static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs)
87{ 87{
88 return __raw_readl(obj->regbase + offs); 88 return __raw_readl(obj->regbase + offs);
89} 89}
90 90
91static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) 91static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
92{ 92{
93 __raw_writel(val, obj->regbase + offs); 93 __raw_writel(val, obj->regbase + offs);
94} 94}
diff --git a/arch/arm/plat-omap/iopgtable.h b/arch/arm/plat-omap/include/plat/iopgtable.h
index c3e93bb0911..e0c56aafc2e 100644
--- a/arch/arm/plat-omap/iopgtable.h
+++ b/arch/arm/plat-omap/include/plat/iopgtable.h
@@ -97,6 +97,6 @@ static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
97} 97}
98 98
99#define to_iommu(dev) \ 99#define to_iommu(dev) \
100 (struct iommu *)platform_get_drvdata(to_platform_device(dev)) 100 (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
101 101
102#endif /* __PLAT_OMAP_IOMMU_H */ 102#endif /* __PLAT_OMAP_IOMMU_H */
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index e992b9655fb..498e57cda6c 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -13,8 +13,10 @@
13#ifndef __IOMMU_MMAP_H 13#ifndef __IOMMU_MMAP_H
14#define __IOMMU_MMAP_H 14#define __IOMMU_MMAP_H
15 15
16#include <linux/iommu.h>
17
16struct iovm_struct { 18struct iovm_struct {
17 struct iommu *iommu; /* iommu object which this belongs to */ 19 struct omap_iommu *iommu; /* iommu object which this belongs to */
18 u32 da_start; /* area definition */ 20 u32 da_start; /* area definition */
19 u32 da_end; 21 u32 da_end;
20 u32 flags; /* IOVMF_: see below */ 22 u32 flags; /* IOVMF_: see below */
@@ -70,20 +72,18 @@ struct iovm_struct {
70#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) 72#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
71 73
72 74
73extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); 75extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da);
74extern u32 iommu_vmap(struct iommu *obj, u32 da, 76extern u32
77omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
75 const struct sg_table *sgt, u32 flags); 78 const struct sg_table *sgt, u32 flags);
76extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); 79extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,
77extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, 80 struct device *dev, u32 da);
78 u32 flags); 81extern u32
79extern void iommu_vfree(struct iommu *obj, const u32 da); 82omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev,
80extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, 83 u32 da, size_t bytes, u32 flags);
81 u32 flags); 84extern void
82extern void iommu_kunmap(struct iommu *obj, u32 da); 85omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
83extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, 86 const u32 da);
84 u32 flags); 87extern void *omap_da_to_va(struct device *dev, u32 da);
85extern void iommu_kfree(struct iommu *obj, u32 da);
86
87extern void *da_to_va(struct iommu *obj, u32 da);
88 88
89#endif /* __IOMMU_MMAP_H */ 89#endif /* __IOMMU_MMAP_H */
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 5a25098ea7e..30e10719b77 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -357,6 +357,7 @@
357#define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69 357#define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69
358#define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70 358#define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70
359#define INT_35XX_USBOTG_IRQ 71 359#define INT_35XX_USBOTG_IRQ 71
360#define INT_35XX_UART4 84
360#define INT_35XX_CCDC_VD0_IRQ 88 361#define INT_35XX_CCDC_VD0_IRQ 88
361#define INT_35XX_CCDC_VD1_IRQ 92 362#define INT_35XX_CCDC_VD1_IRQ 92
362#define INT_35XX_CCDC_VD2_IRQ 93 363#define INT_35XX_CCDC_VD2_IRQ 93
@@ -407,11 +408,19 @@
407#endif 408#endif
408#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS) 409#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS)
409 410
411#define TWL6040_CODEC_IRQ_BASE TWL6030_IRQ_END
412#ifdef CONFIG_TWL6040_CODEC
413#define TWL6040_CODEC_NR_IRQS 6
414#else
415#define TWL6040_CODEC_NR_IRQS 0
416#endif
417#define TWL6040_CODEC_IRQ_END (TWL6040_CODEC_IRQ_BASE + TWL6040_CODEC_NR_IRQS)
418
410/* Total number of interrupts depends on the enabled blocks above */ 419/* Total number of interrupts depends on the enabled blocks above */
411#if (TWL4030_GPIO_IRQ_END > TWL6030_IRQ_END) 420#if (TWL4030_GPIO_IRQ_END > TWL6040_CODEC_IRQ_END)
412#define TWL_IRQ_END TWL4030_GPIO_IRQ_END 421#define TWL_IRQ_END TWL4030_GPIO_IRQ_END
413#else 422#else
414#define TWL_IRQ_END TWL6030_IRQ_END 423#define TWL_IRQ_END TWL6040_CODEC_IRQ_END
415#endif 424#endif
416 425
417/* GPMC related */ 426/* GPMC related */
@@ -428,7 +437,11 @@
428#define INTCPS_NR_IRQS 96 437#define INTCPS_NR_IRQS 96
429 438
430#ifndef __ASSEMBLY__ 439#ifndef __ASSEMBLY__
431extern void omap_init_irq(void); 440extern void __iomem *omap_irq_base;
441void omap1_init_irq(void);
442void omap2_init_irq(void);
443void omap3_init_irq(void);
444void ti816x_init_irq(void);
432extern int omap_irq_pending(void); 445extern int omap_irq_pending(void);
433void omap_intc_save_context(void); 446void omap_intc_save_context(void);
434void omap_intc_restore_context(void); 447void omap_intc_restore_context(void);
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index f8f690ab299..9882c657b2d 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -24,7 +24,6 @@
24#ifndef __ASM_ARCH_OMAP_MCBSP_H 24#ifndef __ASM_ARCH_OMAP_MCBSP_H
25#define __ASM_ARCH_OMAP_MCBSP_H 25#define __ASM_ARCH_OMAP_MCBSP_H
26 26
27#include <linux/completion.h>
28#include <linux/spinlock.h> 27#include <linux/spinlock.h>
29 28
30#include <mach/hardware.h> 29#include <mach/hardware.h>
@@ -34,7 +33,7 @@
34#define OMAP_MCBSP_PLATFORM_DEVICE(port_nr) \ 33#define OMAP_MCBSP_PLATFORM_DEVICE(port_nr) \
35static struct platform_device omap_mcbsp##port_nr = { \ 34static struct platform_device omap_mcbsp##port_nr = { \
36 .name = "omap-mcbsp-dai", \ 35 .name = "omap-mcbsp-dai", \
37 .id = OMAP_MCBSP##port_nr, \ 36 .id = port_nr - 1, \
38} 37}
39 38
40#define MCBSP_CONFIG_TYPE2 0x2 39#define MCBSP_CONFIG_TYPE2 0x2
@@ -333,18 +332,6 @@ struct omap_mcbsp_reg_cfg {
333}; 332};
334 333
335typedef enum { 334typedef enum {
336 OMAP_MCBSP1 = 0,
337 OMAP_MCBSP2,
338 OMAP_MCBSP3,
339 OMAP_MCBSP4,
340 OMAP_MCBSP5
341} omap_mcbsp_id;
342
343typedef int __bitwise omap_mcbsp_io_type_t;
344#define OMAP_MCBSP_IRQ_IO ((__force omap_mcbsp_io_type_t) 1)
345#define OMAP_MCBSP_POLL_IO ((__force omap_mcbsp_io_type_t) 2)
346
347typedef enum {
348 OMAP_MCBSP_WORD_8 = 0, 335 OMAP_MCBSP_WORD_8 = 0,
349 OMAP_MCBSP_WORD_12, 336 OMAP_MCBSP_WORD_12,
350 OMAP_MCBSP_WORD_16, 337 OMAP_MCBSP_WORD_16,
@@ -353,38 +340,6 @@ typedef enum {
353 OMAP_MCBSP_WORD_32, 340 OMAP_MCBSP_WORD_32,
354} omap_mcbsp_word_length; 341} omap_mcbsp_word_length;
355 342
356typedef enum {
357 OMAP_MCBSP_CLK_RISING = 0,
358 OMAP_MCBSP_CLK_FALLING,
359} omap_mcbsp_clk_polarity;
360
361typedef enum {
362 OMAP_MCBSP_FS_ACTIVE_HIGH = 0,
363 OMAP_MCBSP_FS_ACTIVE_LOW,
364} omap_mcbsp_fs_polarity;
365
366typedef enum {
367 OMAP_MCBSP_CLK_STP_MODE_NO_DELAY = 0,
368 OMAP_MCBSP_CLK_STP_MODE_DELAY,
369} omap_mcbsp_clk_stp_mode;
370
371
372/******* SPI specific mode **********/
373typedef enum {
374 OMAP_MCBSP_SPI_MASTER = 0,
375 OMAP_MCBSP_SPI_SLAVE,
376} omap_mcbsp_spi_mode;
377
378struct omap_mcbsp_spi_cfg {
379 omap_mcbsp_spi_mode spi_mode;
380 omap_mcbsp_clk_polarity rx_clock_polarity;
381 omap_mcbsp_clk_polarity tx_clock_polarity;
382 omap_mcbsp_fs_polarity fsx_polarity;
383 u8 clk_div;
384 omap_mcbsp_clk_stp_mode clk_stp_mode;
385 omap_mcbsp_word_length word_length;
386};
387
388/* Platform specific configuration */ 343/* Platform specific configuration */
389struct omap_mcbsp_ops { 344struct omap_mcbsp_ops {
390 void (*request)(unsigned int); 345 void (*request)(unsigned int);
@@ -422,25 +377,13 @@ struct omap_mcbsp {
422 void __iomem *io_base; 377 void __iomem *io_base;
423 u8 id; 378 u8 id;
424 u8 free; 379 u8 free;
425 omap_mcbsp_word_length rx_word_length;
426 omap_mcbsp_word_length tx_word_length;
427 380
428 omap_mcbsp_io_type_t io_type; /* IRQ or poll */
429 /* IRQ based TX/RX */
430 int rx_irq; 381 int rx_irq;
431 int tx_irq; 382 int tx_irq;
432 383
433 /* DMA stuff */ 384 /* DMA stuff */
434 u8 dma_rx_sync; 385 u8 dma_rx_sync;
435 short dma_rx_lch;
436 u8 dma_tx_sync; 386 u8 dma_tx_sync;
437 short dma_tx_lch;
438
439 /* Completion queues */
440 struct completion tx_irq_completion;
441 struct completion rx_irq_completion;
442 struct completion tx_dma_completion;
443 struct completion rx_dma_completion;
444 387
445 /* Protect the field .free, while checking if the mcbsp is in use */ 388 /* Protect the field .free, while checking if the mcbsp is in use */
446 spinlock_t lock; 389 spinlock_t lock;
@@ -499,24 +442,9 @@ int omap_mcbsp_request(unsigned int id);
499void omap_mcbsp_free(unsigned int id); 442void omap_mcbsp_free(unsigned int id);
500void omap_mcbsp_start(unsigned int id, int tx, int rx); 443void omap_mcbsp_start(unsigned int id, int tx, int rx);
501void omap_mcbsp_stop(unsigned int id, int tx, int rx); 444void omap_mcbsp_stop(unsigned int id, int tx, int rx);
502void omap_mcbsp_xmit_word(unsigned int id, u32 word);
503u32 omap_mcbsp_recv_word(unsigned int id);
504
505int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
506int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
507int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word);
508int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word);
509
510 445
511/* McBSP functional clock source changing function */ 446/* McBSP functional clock source changing function */
512extern int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id); 447extern int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id);
513/* SPI specific API */
514void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg);
515
516/* Polled read/write functions */
517int omap_mcbsp_pollread(unsigned int id, u16 * buf);
518int omap_mcbsp_pollwrite(unsigned int id, u16 buf);
519int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type);
520 448
521/* McBSP signal muxing API */ 449/* McBSP signal muxing API */
522void omap2_mcbsp1_mux_clkr_src(u8 mux); 450void omap2_mcbsp1_mux_clkr_src(u8 mux);
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h
index d86d1ecf006..67fc5060183 100644
--- a/arch/arm/plat-omap/include/plat/nand.h
+++ b/arch/arm/plat-omap/include/plat/nand.h
@@ -19,15 +19,11 @@ enum nand_io {
19}; 19};
20 20
21struct omap_nand_platform_data { 21struct omap_nand_platform_data {
22 unsigned int options;
23 int cs; 22 int cs;
24 int gpio_irq;
25 struct mtd_partition *parts; 23 struct mtd_partition *parts;
26 struct gpmc_timings *gpmc_t; 24 struct gpmc_timings *gpmc_t;
27 int nr_parts; 25 int nr_parts;
28 int (*nand_setup)(void); 26 bool dev_ready;
29 int (*dev_ready)(struct omap_nand_platform_data *);
30 int dma_channel;
31 int gpmc_irq; 27 int gpmc_irq;
32 enum nand_io xfer_type; 28 enum nand_io xfer_type;
33 unsigned long phys_base; 29 unsigned long phys_base;
diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h
index c0a75205303..0840df813f4 100644
--- a/arch/arm/plat-omap/include/plat/omap-pm.h
+++ b/arch/arm/plat-omap/include/plat/omap-pm.h
@@ -40,11 +40,7 @@
40 * framework starts. The "_if_" is to avoid name collisions with the 40 * framework starts. The "_if_" is to avoid name collisions with the
41 * PM idle-loop code. 41 * PM idle-loop code.
42 */ 42 */
43#ifdef CONFIG_OMAP_PM_NONE
44#define omap_pm_if_early_init() 0
45#else
46int __init omap_pm_if_early_init(void); 43int __init omap_pm_if_early_init(void);
47#endif
48 44
49/** 45/**
50 * omap_pm_if_init - OMAP PM init code called after clock fw init 46 * omap_pm_if_init - OMAP PM init code called after clock fw init
@@ -52,11 +48,7 @@ int __init omap_pm_if_early_init(void);
52 * The main initialization code. OPP tables are passed in here. The 48 * The main initialization code. OPP tables are passed in here. The
53 * "_if_" is to avoid name collisions with the PM idle-loop code. 49 * "_if_" is to avoid name collisions with the PM idle-loop code.
54 */ 50 */
55#ifdef CONFIG_OMAP_PM_NONE
56#define omap_pm_if_init() 0
57#else
58int __init omap_pm_if_init(void); 51int __init omap_pm_if_init(void);
59#endif
60 52
61/** 53/**
62 * omap_pm_if_exit - OMAP PM exit code 54 * omap_pm_if_exit - OMAP PM exit code
diff --git a/arch/arm/plat-omap/include/plat/omap4-keypad.h b/arch/arm/plat-omap/include/plat/omap4-keypad.h
index 2b1d9bc1eeb..9fe6c878323 100644
--- a/arch/arm/plat-omap/include/plat/omap4-keypad.h
+++ b/arch/arm/plat-omap/include/plat/omap4-keypad.h
@@ -10,5 +10,6 @@ struct omap4_keypad_platform_data {
10 u8 cols; 10 u8 cols;
11}; 11};
12 12
13extern int omap4_keyboard_init(struct omap4_keypad_platform_data *); 13extern int omap4_keyboard_init(struct omap4_keypad_platform_data *,
14 struct omap_board_data *);
14#endif 15#endif
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349ff9fd..ee405b36df4 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -44,6 +44,10 @@ extern struct device omap_device_parent;
44#define OMAP_DEVICE_STATE_IDLE 2 44#define OMAP_DEVICE_STATE_IDLE 2
45#define OMAP_DEVICE_STATE_SHUTDOWN 3 45#define OMAP_DEVICE_STATE_SHUTDOWN 3
46 46
47/* omap_device.flags values */
48#define OMAP_DEVICE_SUSPENDED BIT(0)
49#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1)
50
47/** 51/**
48 * struct omap_device - omap_device wrapper for platform_devices 52 * struct omap_device - omap_device wrapper for platform_devices
49 * @pdev: platform_device 53 * @pdev: platform_device
@@ -73,6 +77,7 @@ struct omap_device {
73 s8 pm_lat_level; 77 s8 pm_lat_level;
74 u8 hwmods_cnt; 78 u8 hwmods_cnt;
75 u8 _state; 79 u8 _state;
80 u8 flags;
76}; 81};
77 82
78/* Device driver interface (call via platform_data fn ptrs) */ 83/* Device driver interface (call via platform_data fn ptrs) */
@@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od);
117int omap_device_disable_clocks(struct omap_device *od); 122int omap_device_disable_clocks(struct omap_device *od);
118int omap_device_enable_clocks(struct omap_device *od); 123int omap_device_enable_clocks(struct omap_device *od);
119 124
125static inline void omap_device_disable_idle_on_suspend(struct omap_device *od)
126{
127 od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND;
128}
120 129
121/* 130/*
122 * Entries should be kept in latency order ascending 131 * Entries should be kept in latency order ascending
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 1adea9c6298..0e329ca88a7 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -2,6 +2,7 @@
2 * omap_hwmod macros, structures 2 * omap_hwmod macros, structures
3 * 3 *
4 * Copyright (C) 2009-2011 Nokia Corporation 4 * Copyright (C) 2009-2011 Nokia Corporation
5 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Paul Walmsley 6 * Paul Walmsley
6 * 7 *
7 * Created in collaboration with (alphabetical order): Benoît Cousson, 8 * Created in collaboration with (alphabetical order): Benoît Cousson,
@@ -77,9 +78,13 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
77#define HWMOD_IDLEMODE_FORCE (1 << 0) 78#define HWMOD_IDLEMODE_FORCE (1 << 0)
78#define HWMOD_IDLEMODE_NO (1 << 1) 79#define HWMOD_IDLEMODE_NO (1 << 1)
79#define HWMOD_IDLEMODE_SMART (1 << 2) 80#define HWMOD_IDLEMODE_SMART (1 << 2)
80/* Slave idle mode flag only */
81#define HWMOD_IDLEMODE_SMART_WKUP (1 << 3) 81#define HWMOD_IDLEMODE_SMART_WKUP (1 << 3)
82 82
83/* modulemode control type (SW or HW) */
84#define MODULEMODE_HWCTRL 1
85#define MODULEMODE_SWCTRL 2
86
87
83/** 88/**
84 * struct omap_hwmod_mux_info - hwmod specific mux configuration 89 * struct omap_hwmod_mux_info - hwmod specific mux configuration
85 * @pads: array of omap_device_pad entries 90 * @pads: array of omap_device_pad entries
@@ -98,7 +103,7 @@ struct omap_hwmod_mux_info {
98/** 103/**
99 * struct omap_hwmod_irq_info - MPU IRQs used by the hwmod 104 * struct omap_hwmod_irq_info - MPU IRQs used by the hwmod
100 * @name: name of the IRQ channel (module local name) 105 * @name: name of the IRQ channel (module local name)
101 * @irq_ch: IRQ channel ID 106 * @irq: IRQ channel ID (should be non-negative except -1 = terminator)
102 * 107 *
103 * @name should be something short, e.g., "tx" or "rx". It is for use 108 * @name should be something short, e.g., "tx" or "rx". It is for use
104 * by platform_get_resource_byname(). It is defined locally to the 109 * by platform_get_resource_byname(). It is defined locally to the
@@ -106,13 +111,13 @@ struct omap_hwmod_mux_info {
106 */ 111 */
107struct omap_hwmod_irq_info { 112struct omap_hwmod_irq_info {
108 const char *name; 113 const char *name;
109 u16 irq; 114 s16 irq;
110}; 115};
111 116
112/** 117/**
113 * struct omap_hwmod_dma_info - DMA channels used by the hwmod 118 * struct omap_hwmod_dma_info - DMA channels used by the hwmod
114 * @name: name of the DMA channel (module local name) 119 * @name: name of the DMA channel (module local name)
115 * @dma_req: DMA request ID 120 * @dma_req: DMA request ID (should be non-negative except -1 = terminator)
116 * 121 *
117 * @name should be something short, e.g., "tx" or "rx". It is for use 122 * @name should be something short, e.g., "tx" or "rx". It is for use
118 * by platform_get_resource_byname(). It is defined locally to the 123 * by platform_get_resource_byname(). It is defined locally to the
@@ -120,7 +125,7 @@ struct omap_hwmod_irq_info {
120 */ 125 */
121struct omap_hwmod_dma_info { 126struct omap_hwmod_dma_info {
122 const char *name; 127 const char *name;
123 u16 dma_req; 128 s16 dma_req;
124}; 129};
125 130
126/** 131/**
@@ -220,7 +225,6 @@ struct omap_hwmod_addr_space {
220 * @clk: interface clock: OMAP clock name 225 * @clk: interface clock: OMAP clock name
221 * @_clk: pointer to the interface struct clk (filled in at runtime) 226 * @_clk: pointer to the interface struct clk (filled in at runtime)
222 * @fw: interface firewall data 227 * @fw: interface firewall data
223 * @addr_cnt: ARRAY_SIZE(@addr)
224 * @width: OCP data width 228 * @width: OCP data width
225 * @user: initiators using this interface (see OCP_USER_* macros above) 229 * @user: initiators using this interface (see OCP_USER_* macros above)
226 * @flags: OCP interface flags (see OCPIF_* macros above) 230 * @flags: OCP interface flags (see OCPIF_* macros above)
@@ -239,7 +243,6 @@ struct omap_hwmod_ocp_if {
239 union { 243 union {
240 struct omap_hwmod_omap2_firewall omap2; 244 struct omap_hwmod_omap2_firewall omap2;
241 } fw; 245 } fw;
242 u8 addr_cnt;
243 u8 width; 246 u8 width;
244 u8 user; 247 u8 user;
245 u8 flags; 248 u8 flags;
@@ -258,6 +261,7 @@ struct omap_hwmod_ocp_if {
258#define MSTANDBY_FORCE (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT) 261#define MSTANDBY_FORCE (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
259#define MSTANDBY_NO (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT) 262#define MSTANDBY_NO (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
260#define MSTANDBY_SMART (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT) 263#define MSTANDBY_SMART (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
264#define MSTANDBY_SMART_WKUP (HWMOD_IDLEMODE_SMART_WKUP << MASTER_STANDBY_SHIFT)
261 265
262/* omap_hwmod_sysconfig.sysc_flags capability flags */ 266/* omap_hwmod_sysconfig.sysc_flags capability flags */
263#define SYSC_HAS_AUTOIDLE (1 << 0) 267#define SYSC_HAS_AUTOIDLE (1 << 0)
@@ -362,9 +366,11 @@ struct omap_hwmod_omap2_prcm {
362 * @submodule_wkdep_bit: bit shift of the WKDEP range 366 * @submodule_wkdep_bit: bit shift of the WKDEP range
363 */ 367 */
364struct omap_hwmod_omap4_prcm { 368struct omap_hwmod_omap4_prcm {
365 void __iomem *clkctrl_reg; 369 u16 clkctrl_offs;
366 void __iomem *rstctrl_reg; 370 u16 rstctrl_offs;
371 u16 context_offs;
367 u8 submodule_wkdep_bit; 372 u8 submodule_wkdep_bit;
373 u8 modulemode;
368}; 374};
369 375
370 376
@@ -468,8 +474,8 @@ struct omap_hwmod_class {
468 * @name: name of the hwmod 474 * @name: name of the hwmod
469 * @class: struct omap_hwmod_class * to the class of this hwmod 475 * @class: struct omap_hwmod_class * to the class of this hwmod
470 * @od: struct omap_device currently associated with this hwmod (internal use) 476 * @od: struct omap_device currently associated with this hwmod (internal use)
471 * @mpu_irqs: ptr to an array of MPU IRQs (see also mpu_irqs_cnt) 477 * @mpu_irqs: ptr to an array of MPU IRQs
472 * @sdma_reqs: ptr to an array of System DMA request IDs (see sdma_reqs_cnt) 478 * @sdma_reqs: ptr to an array of System DMA request IDs
473 * @prcm: PRCM data pertaining to this hwmod 479 * @prcm: PRCM data pertaining to this hwmod
474 * @main_clk: main clock: OMAP clock name 480 * @main_clk: main clock: OMAP clock name
475 * @_clk: pointer to the main struct clk (filled in at runtime) 481 * @_clk: pointer to the main struct clk (filled in at runtime)
@@ -482,8 +488,6 @@ struct omap_hwmod_class {
482 * @_sysc_cache: internal-use hwmod flags 488 * @_sysc_cache: internal-use hwmod flags
483 * @_mpu_rt_va: cached register target start address (internal use) 489 * @_mpu_rt_va: cached register target start address (internal use)
484 * @_mpu_port_index: cached MPU register target slave ID (internal use) 490 * @_mpu_port_index: cached MPU register target slave ID (internal use)
485 * @mpu_irqs_cnt: number of @mpu_irqs
486 * @sdma_reqs_cnt: number of @sdma_reqs
487 * @opt_clks_cnt: number of @opt_clks 491 * @opt_clks_cnt: number of @opt_clks
488 * @master_cnt: number of @master entries 492 * @master_cnt: number of @master entries
489 * @slaves_cnt: number of @slave entries 493 * @slaves_cnt: number of @slave entries
@@ -519,6 +523,8 @@ struct omap_hwmod {
519 const char *main_clk; 523 const char *main_clk;
520 struct clk *_clk; 524 struct clk *_clk;
521 struct omap_hwmod_opt_clk *opt_clks; 525 struct omap_hwmod_opt_clk *opt_clks;
526 char *clkdm_name;
527 struct clockdomain *clkdm;
522 char *vdd_name; 528 char *vdd_name;
523 struct voltagedomain *voltdm; 529 struct voltagedomain *voltdm;
524 struct omap_hwmod_ocp_if **masters; /* connect to *_IA */ 530 struct omap_hwmod_ocp_if **masters; /* connect to *_IA */
@@ -531,8 +537,6 @@ struct omap_hwmod {
531 u16 flags; 537 u16 flags;
532 u8 _mpu_port_index; 538 u8 _mpu_port_index;
533 u8 response_lat; 539 u8 response_lat;
534 u8 mpu_irqs_cnt;
535 u8 sdma_reqs_cnt;
536 u8 rst_lines_cnt; 540 u8 rst_lines_cnt;
537 u8 opt_clks_cnt; 541 u8 opt_clks_cnt;
538 u8 masters_cnt; 542 u8 masters_cnt;
@@ -572,6 +576,7 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
572 576
573void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs); 577void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs);
574u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs); 578u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs);
579int omap_hwmod_softreset(struct omap_hwmod *oh);
575 580
576int omap_hwmod_count_resources(struct omap_hwmod *oh); 581int omap_hwmod_count_resources(struct omap_hwmod *oh);
577int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res); 582int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res);
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 2723f9166ea..de3b10c1812 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -56,6 +56,9 @@
56#define TI816X_UART2_BASE 0x48022000 56#define TI816X_UART2_BASE 0x48022000
57#define TI816X_UART3_BASE 0x48024000 57#define TI816X_UART3_BASE 0x48024000
58 58
59/* AM3505/3517 UART4 */
60#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
61
59/* External port on Zoom2/3 */ 62/* External port on Zoom2/3 */
60#define ZOOM_UART_BASE 0x10000000 63#define ZOOM_UART_BASE 0x10000000
61#define ZOOM_UART_VIRT 0xfa400000 64#define ZOOM_UART_VIRT 0xfa400000
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index ac4b60d9aa2..a067484cc4a 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -148,6 +148,7 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
148 /* omap3 based boards using UART3 */ 148 /* omap3 based boards using UART3 */
149 DEBUG_LL_OMAP3(3, cm_t35); 149 DEBUG_LL_OMAP3(3, cm_t35);
150 DEBUG_LL_OMAP3(3, cm_t3517); 150 DEBUG_LL_OMAP3(3, cm_t3517);
151 DEBUG_LL_OMAP3(3, cm_t3730);
151 DEBUG_LL_OMAP3(3, craneboard); 152 DEBUG_LL_OMAP3(3, craneboard);
152 DEBUG_LL_OMAP3(3, devkit8000); 153 DEBUG_LL_OMAP3(3, devkit8000);
153 DEBUG_LL_OMAP3(3, igep0020); 154 DEBUG_LL_OMAP3(3, igep0020);
diff --git a/arch/arm/plat-omap/iommu-debug.c b/arch/arm/plat-omap/iommu-debug.c
deleted file mode 100644
index f07cf2f08e0..00000000000
--- a/arch/arm/plat-omap/iommu-debug.c
+++ /dev/null
@@ -1,418 +0,0 @@
1/*
2 * omap iommu: debugfs interface
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/uaccess.h>
18#include <linux/platform_device.h>
19#include <linux/debugfs.h>
20
21#include <plat/iommu.h>
22#include <plat/iovmm.h>
23
24#include "iopgtable.h"
25
26#define MAXCOLUMN 100 /* for short messages */
27
28static DEFINE_MUTEX(iommu_debug_lock);
29
30static struct dentry *iommu_debug_root;
31
32static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
33 size_t count, loff_t *ppos)
34{
35 u32 ver = iommu_arch_version();
36 char buf[MAXCOLUMN], *p = buf;
37
38 p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
39
40 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
41}
42
43static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
44 size_t count, loff_t *ppos)
45{
46 struct iommu *obj = file->private_data;
47 char *p, *buf;
48 ssize_t bytes;
49
50 buf = kmalloc(count, GFP_KERNEL);
51 if (!buf)
52 return -ENOMEM;
53 p = buf;
54
55 mutex_lock(&iommu_debug_lock);
56
57 bytes = iommu_dump_ctx(obj, p, count);
58 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
59
60 mutex_unlock(&iommu_debug_lock);
61 kfree(buf);
62
63 return bytes;
64}
65
66static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
67 size_t count, loff_t *ppos)
68{
69 struct iommu *obj = file->private_data;
70 char *p, *buf;
71 ssize_t bytes, rest;
72
73 buf = kmalloc(count, GFP_KERNEL);
74 if (!buf)
75 return -ENOMEM;
76 p = buf;
77
78 mutex_lock(&iommu_debug_lock);
79
80 p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
81 p += sprintf(p, "-----------------------------------------\n");
82 rest = count - (p - buf);
83 p += dump_tlb_entries(obj, p, rest);
84
85 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
86
87 mutex_unlock(&iommu_debug_lock);
88 kfree(buf);
89
90 return bytes;
91}
92
93static ssize_t debug_write_pagetable(struct file *file,
94 const char __user *userbuf, size_t count, loff_t *ppos)
95{
96 struct iotlb_entry e;
97 struct cr_regs cr;
98 int err;
99 struct iommu *obj = file->private_data;
100 char buf[MAXCOLUMN], *p = buf;
101
102 count = min(count, sizeof(buf));
103
104 mutex_lock(&iommu_debug_lock);
105 if (copy_from_user(p, userbuf, count)) {
106 mutex_unlock(&iommu_debug_lock);
107 return -EFAULT;
108 }
109
110 sscanf(p, "%x %x", &cr.cam, &cr.ram);
111 if (!cr.cam || !cr.ram) {
112 mutex_unlock(&iommu_debug_lock);
113 return -EINVAL;
114 }
115
116 iotlb_cr_to_e(&cr, &e);
117 err = iopgtable_store_entry(obj, &e);
118 if (err)
119 dev_err(obj->dev, "%s: fail to store cr\n", __func__);
120
121 mutex_unlock(&iommu_debug_lock);
122 return count;
123}
124
125#define dump_ioptable_entry_one(lv, da, val) \
126 ({ \
127 int __err = 0; \
128 ssize_t bytes; \
129 const int maxcol = 22; \
130 const char *str = "%d: %08x %08x\n"; \
131 bytes = snprintf(p, maxcol, str, lv, da, val); \
132 p += bytes; \
133 len -= bytes; \
134 if (len < maxcol) \
135 __err = -ENOMEM; \
136 __err; \
137 })
138
139static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len)
140{
141 int i;
142 u32 *iopgd;
143 char *p = buf;
144
145 spin_lock(&obj->page_table_lock);
146
147 iopgd = iopgd_offset(obj, 0);
148 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
149 int j, err;
150 u32 *iopte;
151 u32 da;
152
153 if (!*iopgd)
154 continue;
155
156 if (!(*iopgd & IOPGD_TABLE)) {
157 da = i << IOPGD_SHIFT;
158
159 err = dump_ioptable_entry_one(1, da, *iopgd);
160 if (err)
161 goto out;
162 continue;
163 }
164
165 iopte = iopte_offset(iopgd, 0);
166
167 for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
168 if (!*iopte)
169 continue;
170
171 da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
172 err = dump_ioptable_entry_one(2, da, *iopgd);
173 if (err)
174 goto out;
175 }
176 }
177out:
178 spin_unlock(&obj->page_table_lock);
179
180 return p - buf;
181}
182
183static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
184 size_t count, loff_t *ppos)
185{
186 struct iommu *obj = file->private_data;
187 char *p, *buf;
188 size_t bytes;
189
190 buf = (char *)__get_free_page(GFP_KERNEL);
191 if (!buf)
192 return -ENOMEM;
193 p = buf;
194
195 p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
196 p += sprintf(p, "-----------------------------------------\n");
197
198 mutex_lock(&iommu_debug_lock);
199
200 bytes = PAGE_SIZE - (p - buf);
201 p += dump_ioptable(obj, p, bytes);
202
203 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
204
205 mutex_unlock(&iommu_debug_lock);
206 free_page((unsigned long)buf);
207
208 return bytes;
209}
210
211static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
212 size_t count, loff_t *ppos)
213{
214 struct iommu *obj = file->private_data;
215 char *p, *buf;
216 struct iovm_struct *tmp;
217 int uninitialized_var(i);
218 ssize_t bytes;
219
220 buf = (char *)__get_free_page(GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223 p = buf;
224
225 p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n",
226 "No", "start", "end", "size", "flags");
227 p += sprintf(p, "-------------------------------------------------\n");
228
229 mutex_lock(&iommu_debug_lock);
230
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 size_t len;
233 const char *str = "%3d %08x-%08x %6x %8x\n";
234 const int maxcol = 39;
235
236 len = tmp->da_end - tmp->da_start;
237 p += snprintf(p, maxcol, str,
238 i, tmp->da_start, tmp->da_end, len, tmp->flags);
239
240 if (PAGE_SIZE - (p - buf) < maxcol)
241 break;
242 i++;
243 }
244
245 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
246
247 mutex_unlock(&iommu_debug_lock);
248 free_page((unsigned long)buf);
249
250 return bytes;
251}
252
253static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
254 size_t count, loff_t *ppos)
255{
256 struct iommu *obj = file->private_data;
257 char *p, *buf;
258 struct iovm_struct *area;
259 ssize_t bytes;
260
261 count = min_t(ssize_t, count, PAGE_SIZE);
262
263 buf = (char *)__get_free_page(GFP_KERNEL);
264 if (!buf)
265 return -ENOMEM;
266 p = buf;
267
268 mutex_lock(&iommu_debug_lock);
269
270 area = find_iovm_area(obj, (u32)ppos);
271 if (IS_ERR(area)) {
272 bytes = -EINVAL;
273 goto err_out;
274 }
275 memcpy(p, area->va, count);
276 p += count;
277
278 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
279err_out:
280 mutex_unlock(&iommu_debug_lock);
281 free_page((unsigned long)buf);
282
283 return bytes;
284}
285
286static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
287 size_t count, loff_t *ppos)
288{
289 struct iommu *obj = file->private_data;
290 struct iovm_struct *area;
291 char *p, *buf;
292
293 count = min_t(size_t, count, PAGE_SIZE);
294
295 buf = (char *)__get_free_page(GFP_KERNEL);
296 if (!buf)
297 return -ENOMEM;
298 p = buf;
299
300 mutex_lock(&iommu_debug_lock);
301
302 if (copy_from_user(p, userbuf, count)) {
303 count = -EFAULT;
304 goto err_out;
305 }
306
307 area = find_iovm_area(obj, (u32)ppos);
308 if (IS_ERR(area)) {
309 count = -EINVAL;
310 goto err_out;
311 }
312 memcpy(area->va, p, count);
313err_out:
314 mutex_unlock(&iommu_debug_lock);
315 free_page((unsigned long)buf);
316
317 return count;
318}
319
320static int debug_open_generic(struct inode *inode, struct file *file)
321{
322 file->private_data = inode->i_private;
323 return 0;
324}
325
326#define DEBUG_FOPS(name) \
327 static const struct file_operations debug_##name##_fops = { \
328 .open = debug_open_generic, \
329 .read = debug_read_##name, \
330 .write = debug_write_##name, \
331 .llseek = generic_file_llseek, \
332 };
333
334#define DEBUG_FOPS_RO(name) \
335 static const struct file_operations debug_##name##_fops = { \
336 .open = debug_open_generic, \
337 .read = debug_read_##name, \
338 .llseek = generic_file_llseek, \
339 };
340
341DEBUG_FOPS_RO(ver);
342DEBUG_FOPS_RO(regs);
343DEBUG_FOPS_RO(tlb);
344DEBUG_FOPS(pagetable);
345DEBUG_FOPS_RO(mmap);
346DEBUG_FOPS(mem);
347
348#define __DEBUG_ADD_FILE(attr, mode) \
349 { \
350 struct dentry *dent; \
351 dent = debugfs_create_file(#attr, mode, parent, \
352 obj, &debug_##attr##_fops); \
353 if (!dent) \
354 return -ENOMEM; \
355 }
356
357#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
358#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
359
360static int iommu_debug_register(struct device *dev, void *data)
361{
362 struct platform_device *pdev = to_platform_device(dev);
363 struct iommu *obj = platform_get_drvdata(pdev);
364 struct dentry *d, *parent;
365
366 if (!obj || !obj->dev)
367 return -EINVAL;
368
369 d = debugfs_create_dir(obj->name, iommu_debug_root);
370 if (!d)
371 return -ENOMEM;
372 parent = d;
373
374 d = debugfs_create_u8("nr_tlb_entries", 400, parent,
375 (u8 *)&obj->nr_tlb_entries);
376 if (!d)
377 return -ENOMEM;
378
379 DEBUG_ADD_FILE_RO(ver);
380 DEBUG_ADD_FILE_RO(regs);
381 DEBUG_ADD_FILE_RO(tlb);
382 DEBUG_ADD_FILE(pagetable);
383 DEBUG_ADD_FILE_RO(mmap);
384 DEBUG_ADD_FILE(mem);
385
386 return 0;
387}
388
389static int __init iommu_debug_init(void)
390{
391 struct dentry *d;
392 int err;
393
394 d = debugfs_create_dir("iommu", NULL);
395 if (!d)
396 return -ENOMEM;
397 iommu_debug_root = d;
398
399 err = foreach_iommu_device(d, iommu_debug_register);
400 if (err)
401 goto err_out;
402 return 0;
403
404err_out:
405 debugfs_remove_recursive(iommu_debug_root);
406 return err;
407}
408module_init(iommu_debug_init)
409
410static void __exit iommu_debugfs_exit(void)
411{
412 debugfs_remove_recursive(iommu_debug_root);
413}
414module_exit(iommu_debugfs_exit)
415
416MODULE_DESCRIPTION("omap iommu: debugfs interface");
417MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
418MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
deleted file mode 100644
index 34fc31ee908..00000000000
--- a/arch/arm/plat-omap/iommu.c
+++ /dev/null
@@ -1,1102 +0,0 @@
1/*
2 * omap iommu: tlb and pagetable primitives
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h>
21
22#include <asm/cacheflush.h>
23
24#include <plat/iommu.h>
25
26#include "iopgtable.h"
27
28#define for_each_iotlb_cr(obj, n, __i, cr) \
29 for (__i = 0; \
30 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
31 __i++)
32
33/* accommodate the difference between omap1 and omap2/3 */
34static const struct iommu_functions *arch_iommu;
35
36static struct platform_driver omap_iommu_driver;
37static struct kmem_cache *iopte_cachep;
38
39/**
40 * install_iommu_arch - Install archtecure specific iommu functions
41 * @ops: a pointer to architecture specific iommu functions
42 *
43 * There are several kind of iommu algorithm(tlb, pagetable) among
44 * omap series. This interface installs such an iommu algorighm.
45 **/
46int install_iommu_arch(const struct iommu_functions *ops)
47{
48 if (arch_iommu)
49 return -EBUSY;
50
51 arch_iommu = ops;
52 return 0;
53}
54EXPORT_SYMBOL_GPL(install_iommu_arch);
55
56/**
57 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
58 * @ops: a pointer to architecture specific iommu functions
59 *
60 * This interface uninstalls the iommu algorighm installed previously.
61 **/
62void uninstall_iommu_arch(const struct iommu_functions *ops)
63{
64 if (arch_iommu != ops)
65 pr_err("%s: not your arch\n", __func__);
66
67 arch_iommu = NULL;
68}
69EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
70
71/**
72 * iommu_save_ctx - Save registers for pm off-mode support
73 * @obj: target iommu
74 **/
75void iommu_save_ctx(struct iommu *obj)
76{
77 arch_iommu->save_ctx(obj);
78}
79EXPORT_SYMBOL_GPL(iommu_save_ctx);
80
81/**
82 * iommu_restore_ctx - Restore registers for pm off-mode support
83 * @obj: target iommu
84 **/
85void iommu_restore_ctx(struct iommu *obj)
86{
87 arch_iommu->restore_ctx(obj);
88}
89EXPORT_SYMBOL_GPL(iommu_restore_ctx);
90
91/**
92 * iommu_arch_version - Return running iommu arch version
93 **/
94u32 iommu_arch_version(void)
95{
96 return arch_iommu->version;
97}
98EXPORT_SYMBOL_GPL(iommu_arch_version);
99
100static int iommu_enable(struct iommu *obj)
101{
102 int err;
103
104 if (!obj)
105 return -EINVAL;
106
107 if (!arch_iommu)
108 return -ENODEV;
109
110 clk_enable(obj->clk);
111
112 err = arch_iommu->enable(obj);
113
114 clk_disable(obj->clk);
115 return err;
116}
117
118static void iommu_disable(struct iommu *obj)
119{
120 if (!obj)
121 return;
122
123 clk_enable(obj->clk);
124
125 arch_iommu->disable(obj);
126
127 clk_disable(obj->clk);
128}
129
130/*
131 * TLB operations
132 */
133void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
134{
135 BUG_ON(!cr || !e);
136
137 arch_iommu->cr_to_e(cr, e);
138}
139EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
140
141static inline int iotlb_cr_valid(struct cr_regs *cr)
142{
143 if (!cr)
144 return -EINVAL;
145
146 return arch_iommu->cr_valid(cr);
147}
148
149static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
150 struct iotlb_entry *e)
151{
152 if (!e)
153 return NULL;
154
155 return arch_iommu->alloc_cr(obj, e);
156}
157
158u32 iotlb_cr_to_virt(struct cr_regs *cr)
159{
160 return arch_iommu->cr_to_virt(cr);
161}
162EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
163
164static u32 get_iopte_attr(struct iotlb_entry *e)
165{
166 return arch_iommu->get_pte_attr(e);
167}
168
169static u32 iommu_report_fault(struct iommu *obj, u32 *da)
170{
171 return arch_iommu->fault_isr(obj, da);
172}
173
174static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
175{
176 u32 val;
177
178 val = iommu_read_reg(obj, MMU_LOCK);
179
180 l->base = MMU_LOCK_BASE(val);
181 l->vict = MMU_LOCK_VICT(val);
182
183}
184
185static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
186{
187 u32 val;
188
189 val = (l->base << MMU_LOCK_BASE_SHIFT);
190 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
191
192 iommu_write_reg(obj, val, MMU_LOCK);
193}
194
195static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
196{
197 arch_iommu->tlb_read_cr(obj, cr);
198}
199
200static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
201{
202 arch_iommu->tlb_load_cr(obj, cr);
203
204 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
205 iommu_write_reg(obj, 1, MMU_LD_TLB);
206}
207
208/**
209 * iotlb_dump_cr - Dump an iommu tlb entry into buf
210 * @obj: target iommu
211 * @cr: contents of cam and ram register
212 * @buf: output buffer
213 **/
214static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
215 char *buf)
216{
217 BUG_ON(!cr || !buf);
218
219 return arch_iommu->dump_cr(obj, cr, buf);
220}
221
222/* only used in iotlb iteration for-loop */
223static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
224{
225 struct cr_regs cr;
226 struct iotlb_lock l;
227
228 iotlb_lock_get(obj, &l);
229 l.vict = n;
230 iotlb_lock_set(obj, &l);
231 iotlb_read_cr(obj, &cr);
232
233 return cr;
234}
235
236/**
237 * load_iotlb_entry - Set an iommu tlb entry
238 * @obj: target iommu
239 * @e: an iommu tlb entry info
240 **/
241int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
242{
243 int err = 0;
244 struct iotlb_lock l;
245 struct cr_regs *cr;
246
247 if (!obj || !obj->nr_tlb_entries || !e)
248 return -EINVAL;
249
250 clk_enable(obj->clk);
251
252 iotlb_lock_get(obj, &l);
253 if (l.base == obj->nr_tlb_entries) {
254 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
255 err = -EBUSY;
256 goto out;
257 }
258 if (!e->prsvd) {
259 int i;
260 struct cr_regs tmp;
261
262 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
263 if (!iotlb_cr_valid(&tmp))
264 break;
265
266 if (i == obj->nr_tlb_entries) {
267 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
268 err = -EBUSY;
269 goto out;
270 }
271
272 iotlb_lock_get(obj, &l);
273 } else {
274 l.vict = l.base;
275 iotlb_lock_set(obj, &l);
276 }
277
278 cr = iotlb_alloc_cr(obj, e);
279 if (IS_ERR(cr)) {
280 clk_disable(obj->clk);
281 return PTR_ERR(cr);
282 }
283
284 iotlb_load_cr(obj, cr);
285 kfree(cr);
286
287 if (e->prsvd)
288 l.base++;
289 /* increment victim for next tlb load */
290 if (++l.vict == obj->nr_tlb_entries)
291 l.vict = l.base;
292 iotlb_lock_set(obj, &l);
293out:
294 clk_disable(obj->clk);
295 return err;
296}
297EXPORT_SYMBOL_GPL(load_iotlb_entry);
298
299/**
300 * flush_iotlb_page - Clear an iommu tlb entry
301 * @obj: target iommu
302 * @da: iommu device virtual address
303 *
304 * Clear an iommu tlb entry which includes 'da' address.
305 **/
306void flush_iotlb_page(struct iommu *obj, u32 da)
307{
308 int i;
309 struct cr_regs cr;
310
311 clk_enable(obj->clk);
312
313 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
314 u32 start;
315 size_t bytes;
316
317 if (!iotlb_cr_valid(&cr))
318 continue;
319
320 start = iotlb_cr_to_virt(&cr);
321 bytes = iopgsz_to_bytes(cr.cam & 3);
322
323 if ((start <= da) && (da < start + bytes)) {
324 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
325 __func__, start, da, bytes);
326 iotlb_load_cr(obj, &cr);
327 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
328 }
329 }
330 clk_disable(obj->clk);
331
332 if (i == obj->nr_tlb_entries)
333 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
334}
335EXPORT_SYMBOL_GPL(flush_iotlb_page);
336
337/**
338 * flush_iotlb_range - Clear an iommu tlb entries
339 * @obj: target iommu
340 * @start: iommu device virtual address(start)
341 * @end: iommu device virtual address(end)
342 *
343 * Clear an iommu tlb entry which includes 'da' address.
344 **/
345void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
346{
347 u32 da = start;
348
349 while (da < end) {
350 flush_iotlb_page(obj, da);
351 /* FIXME: Optimize for multiple page size */
352 da += IOPTE_SIZE;
353 }
354}
355EXPORT_SYMBOL_GPL(flush_iotlb_range);
356
357/**
358 * flush_iotlb_all - Clear all iommu tlb entries
359 * @obj: target iommu
360 **/
361void flush_iotlb_all(struct iommu *obj)
362{
363 struct iotlb_lock l;
364
365 clk_enable(obj->clk);
366
367 l.base = 0;
368 l.vict = 0;
369 iotlb_lock_set(obj, &l);
370
371 iommu_write_reg(obj, 1, MMU_GFLUSH);
372
373 clk_disable(obj->clk);
374}
375EXPORT_SYMBOL_GPL(flush_iotlb_all);
376
377/**
378 * iommu_set_twl - enable/disable table walking logic
379 * @obj: target iommu
380 * @on: enable/disable
381 *
382 * Function used to enable/disable TWL. If one wants to work
383 * exclusively with locked TLB entries and receive notifications
384 * for TLB miss then call this function to disable TWL.
385 */
386void iommu_set_twl(struct iommu *obj, bool on)
387{
388 clk_enable(obj->clk);
389 arch_iommu->set_twl(obj, on);
390 clk_disable(obj->clk);
391}
392EXPORT_SYMBOL_GPL(iommu_set_twl);
393
394#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
395
396ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
397{
398 if (!obj || !buf)
399 return -EINVAL;
400
401 clk_enable(obj->clk);
402
403 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
404
405 clk_disable(obj->clk);
406
407 return bytes;
408}
409EXPORT_SYMBOL_GPL(iommu_dump_ctx);
410
411static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
412{
413 int i;
414 struct iotlb_lock saved;
415 struct cr_regs tmp;
416 struct cr_regs *p = crs;
417
418 clk_enable(obj->clk);
419 iotlb_lock_get(obj, &saved);
420
421 for_each_iotlb_cr(obj, num, i, tmp) {
422 if (!iotlb_cr_valid(&tmp))
423 continue;
424 *p++ = tmp;
425 }
426
427 iotlb_lock_set(obj, &saved);
428 clk_disable(obj->clk);
429
430 return p - crs;
431}
432
433/**
434 * dump_tlb_entries - dump cr arrays to given buffer
435 * @obj: target iommu
436 * @buf: output buffer
437 **/
438size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
439{
440 int i, num;
441 struct cr_regs *cr;
442 char *p = buf;
443
444 num = bytes / sizeof(*cr);
445 num = min(obj->nr_tlb_entries, num);
446
447 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
448 if (!cr)
449 return 0;
450
451 num = __dump_tlb_entries(obj, cr, num);
452 for (i = 0; i < num; i++)
453 p += iotlb_dump_cr(obj, cr + i, p);
454 kfree(cr);
455
456 return p - buf;
457}
458EXPORT_SYMBOL_GPL(dump_tlb_entries);
459
460int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
461{
462 return driver_for_each_device(&omap_iommu_driver.driver,
463 NULL, data, fn);
464}
465EXPORT_SYMBOL_GPL(foreach_iommu_device);
466
467#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
468
469/*
470 * H/W pagetable operations
471 */
472static void flush_iopgd_range(u32 *first, u32 *last)
473{
474 /* FIXME: L2 cache should be taken care of if it exists */
475 do {
476 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
477 : : "r" (first));
478 first += L1_CACHE_BYTES / sizeof(*first);
479 } while (first <= last);
480}
481
482static void flush_iopte_range(u32 *first, u32 *last)
483{
484 /* FIXME: L2 cache should be taken care of if it exists */
485 do {
486 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
487 : : "r" (first));
488 first += L1_CACHE_BYTES / sizeof(*first);
489 } while (first <= last);
490}
491
492static void iopte_free(u32 *iopte)
493{
494 /* Note: freed iopte's must be clean ready for re-use */
495 kmem_cache_free(iopte_cachep, iopte);
496}
497
498static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
499{
500 u32 *iopte;
501
502 /* a table has already existed */
503 if (*iopgd)
504 goto pte_ready;
505
506 /*
507 * do the allocation outside the page table lock
508 */
509 spin_unlock(&obj->page_table_lock);
510 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
511 spin_lock(&obj->page_table_lock);
512
513 if (!*iopgd) {
514 if (!iopte)
515 return ERR_PTR(-ENOMEM);
516
517 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
518 flush_iopgd_range(iopgd, iopgd);
519
520 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
521 } else {
522 /* We raced, free the reduniovant table */
523 iopte_free(iopte);
524 }
525
526pte_ready:
527 iopte = iopte_offset(iopgd, da);
528
529 dev_vdbg(obj->dev,
530 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
531 __func__, da, iopgd, *iopgd, iopte, *iopte);
532
533 return iopte;
534}
535
536static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
537{
538 u32 *iopgd = iopgd_offset(obj, da);
539
540 if ((da | pa) & ~IOSECTION_MASK) {
541 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
542 __func__, da, pa, IOSECTION_SIZE);
543 return -EINVAL;
544 }
545
546 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
547 flush_iopgd_range(iopgd, iopgd);
548 return 0;
549}
550
551static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
552{
553 u32 *iopgd = iopgd_offset(obj, da);
554 int i;
555
556 if ((da | pa) & ~IOSUPER_MASK) {
557 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__, da, pa, IOSUPER_SIZE);
559 return -EINVAL;
560 }
561
562 for (i = 0; i < 16; i++)
563 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
564 flush_iopgd_range(iopgd, iopgd + 15);
565 return 0;
566}
567
568static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
569{
570 u32 *iopgd = iopgd_offset(obj, da);
571 u32 *iopte = iopte_alloc(obj, iopgd, da);
572
573 if (IS_ERR(iopte))
574 return PTR_ERR(iopte);
575
576 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
577 flush_iopte_range(iopte, iopte);
578
579 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
580 __func__, da, pa, iopte, *iopte);
581
582 return 0;
583}
584
585static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
586{
587 u32 *iopgd = iopgd_offset(obj, da);
588 u32 *iopte = iopte_alloc(obj, iopgd, da);
589 int i;
590
591 if ((da | pa) & ~IOLARGE_MASK) {
592 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
593 __func__, da, pa, IOLARGE_SIZE);
594 return -EINVAL;
595 }
596
597 if (IS_ERR(iopte))
598 return PTR_ERR(iopte);
599
600 for (i = 0; i < 16; i++)
601 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
602 flush_iopte_range(iopte, iopte + 15);
603 return 0;
604}
605
606static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
607{
608 int (*fn)(struct iommu *, u32, u32, u32);
609 u32 prot;
610 int err;
611
612 if (!obj || !e)
613 return -EINVAL;
614
615 switch (e->pgsz) {
616 case MMU_CAM_PGSZ_16M:
617 fn = iopgd_alloc_super;
618 break;
619 case MMU_CAM_PGSZ_1M:
620 fn = iopgd_alloc_section;
621 break;
622 case MMU_CAM_PGSZ_64K:
623 fn = iopte_alloc_large;
624 break;
625 case MMU_CAM_PGSZ_4K:
626 fn = iopte_alloc_page;
627 break;
628 default:
629 fn = NULL;
630 BUG();
631 break;
632 }
633
634 prot = get_iopte_attr(e);
635
636 spin_lock(&obj->page_table_lock);
637 err = fn(obj, e->da, e->pa, prot);
638 spin_unlock(&obj->page_table_lock);
639
640 return err;
641}
642
643/**
644 * iopgtable_store_entry - Make an iommu pte entry
645 * @obj: target iommu
646 * @e: an iommu tlb entry info
647 **/
648int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
649{
650 int err;
651
652 flush_iotlb_page(obj, e->da);
653 err = iopgtable_store_entry_core(obj, e);
654#ifdef PREFETCH_IOTLB
655 if (!err)
656 load_iotlb_entry(obj, e);
657#endif
658 return err;
659}
660EXPORT_SYMBOL_GPL(iopgtable_store_entry);
661
662/**
663 * iopgtable_lookup_entry - Lookup an iommu pte entry
664 * @obj: target iommu
665 * @da: iommu device virtual address
666 * @ppgd: iommu pgd entry pointer to be returned
667 * @ppte: iommu pte entry pointer to be returned
668 **/
669void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
670{
671 u32 *iopgd, *iopte = NULL;
672
673 iopgd = iopgd_offset(obj, da);
674 if (!*iopgd)
675 goto out;
676
677 if (iopgd_is_table(*iopgd))
678 iopte = iopte_offset(iopgd, da);
679out:
680 *ppgd = iopgd;
681 *ppte = iopte;
682}
683EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
684
685static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
686{
687 size_t bytes;
688 u32 *iopgd = iopgd_offset(obj, da);
689 int nent = 1;
690
691 if (!*iopgd)
692 return 0;
693
694 if (iopgd_is_table(*iopgd)) {
695 int i;
696 u32 *iopte = iopte_offset(iopgd, da);
697
698 bytes = IOPTE_SIZE;
699 if (*iopte & IOPTE_LARGE) {
700 nent *= 16;
701 /* rewind to the 1st entry */
702 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
703 }
704 bytes *= nent;
705 memset(iopte, 0, nent * sizeof(*iopte));
706 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
707
708 /*
709 * do table walk to check if this table is necessary or not
710 */
711 iopte = iopte_offset(iopgd, 0);
712 for (i = 0; i < PTRS_PER_IOPTE; i++)
713 if (iopte[i])
714 goto out;
715
716 iopte_free(iopte);
717 nent = 1; /* for the next L1 entry */
718 } else {
719 bytes = IOPGD_SIZE;
720 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
721 nent *= 16;
722 /* rewind to the 1st entry */
723 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
724 }
725 bytes *= nent;
726 }
727 memset(iopgd, 0, nent * sizeof(*iopgd));
728 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
729out:
730 return bytes;
731}
732
733/**
734 * iopgtable_clear_entry - Remove an iommu pte entry
735 * @obj: target iommu
736 * @da: iommu device virtual address
737 **/
738size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
739{
740 size_t bytes;
741
742 spin_lock(&obj->page_table_lock);
743
744 bytes = iopgtable_clear_entry_core(obj, da);
745 flush_iotlb_page(obj, da);
746
747 spin_unlock(&obj->page_table_lock);
748
749 return bytes;
750}
751EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
752
753static void iopgtable_clear_entry_all(struct iommu *obj)
754{
755 int i;
756
757 spin_lock(&obj->page_table_lock);
758
759 for (i = 0; i < PTRS_PER_IOPGD; i++) {
760 u32 da;
761 u32 *iopgd;
762
763 da = i << IOPGD_SHIFT;
764 iopgd = iopgd_offset(obj, da);
765
766 if (!*iopgd)
767 continue;
768
769 if (iopgd_is_table(*iopgd))
770 iopte_free(iopte_offset(iopgd, 0));
771
772 *iopgd = 0;
773 flush_iopgd_range(iopgd, iopgd);
774 }
775
776 flush_iotlb_all(obj);
777
778 spin_unlock(&obj->page_table_lock);
779}
780
781/*
782 * Device IOMMU generic operations
783 */
784static irqreturn_t iommu_fault_handler(int irq, void *data)
785{
786 u32 da, errs;
787 u32 *iopgd, *iopte;
788 struct iommu *obj = data;
789
790 if (!obj->refcount)
791 return IRQ_NONE;
792
793 clk_enable(obj->clk);
794 errs = iommu_report_fault(obj, &da);
795 clk_disable(obj->clk);
796 if (errs == 0)
797 return IRQ_HANDLED;
798
799 /* Fault callback or TLB/PTE Dynamic loading */
800 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
801 return IRQ_HANDLED;
802
803 iommu_disable(obj);
804
805 iopgd = iopgd_offset(obj, da);
806
807 if (!iopgd_is_table(*iopgd)) {
808 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
809 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
810 return IRQ_NONE;
811 }
812
813 iopte = iopte_offset(iopgd, da);
814
815 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
816 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
817 iopte, *iopte);
818
819 return IRQ_NONE;
820}
821
822static int device_match_by_alias(struct device *dev, void *data)
823{
824 struct iommu *obj = to_iommu(dev);
825 const char *name = data;
826
827 pr_debug("%s: %s %s\n", __func__, obj->name, name);
828
829 return strcmp(obj->name, name) == 0;
830}
831
832/**
833 * iommu_set_da_range - Set a valid device address range
834 * @obj: target iommu
835 * @start Start of valid range
836 * @end End of valid range
837 **/
838int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
839{
840
841 if (!obj)
842 return -EFAULT;
843
844 if (end < start || !PAGE_ALIGN(start | end))
845 return -EINVAL;
846
847 obj->da_start = start;
848 obj->da_end = end;
849
850 return 0;
851}
852EXPORT_SYMBOL_GPL(iommu_set_da_range);
853
854/**
855 * iommu_get - Get iommu handler
856 * @name: target iommu name
857 **/
858struct iommu *iommu_get(const char *name)
859{
860 int err = -ENOMEM;
861 struct device *dev;
862 struct iommu *obj;
863
864 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
865 device_match_by_alias);
866 if (!dev)
867 return ERR_PTR(-ENODEV);
868
869 obj = to_iommu(dev);
870
871 mutex_lock(&obj->iommu_lock);
872
873 if (obj->refcount++ == 0) {
874 err = iommu_enable(obj);
875 if (err)
876 goto err_enable;
877 flush_iotlb_all(obj);
878 }
879
880 if (!try_module_get(obj->owner))
881 goto err_module;
882
883 mutex_unlock(&obj->iommu_lock);
884
885 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
886 return obj;
887
888err_module:
889 if (obj->refcount == 1)
890 iommu_disable(obj);
891err_enable:
892 obj->refcount--;
893 mutex_unlock(&obj->iommu_lock);
894 return ERR_PTR(err);
895}
896EXPORT_SYMBOL_GPL(iommu_get);
897
898/**
899 * iommu_put - Put back iommu handler
900 * @obj: target iommu
901 **/
902void iommu_put(struct iommu *obj)
903{
904 if (!obj || IS_ERR(obj))
905 return;
906
907 mutex_lock(&obj->iommu_lock);
908
909 if (--obj->refcount == 0)
910 iommu_disable(obj);
911
912 module_put(obj->owner);
913
914 mutex_unlock(&obj->iommu_lock);
915
916 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
917}
918EXPORT_SYMBOL_GPL(iommu_put);
919
920int iommu_set_isr(const char *name,
921 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
922 void *priv),
923 void *isr_priv)
924{
925 struct device *dev;
926 struct iommu *obj;
927
928 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
929 device_match_by_alias);
930 if (!dev)
931 return -ENODEV;
932
933 obj = to_iommu(dev);
934 mutex_lock(&obj->iommu_lock);
935 if (obj->refcount != 0) {
936 mutex_unlock(&obj->iommu_lock);
937 return -EBUSY;
938 }
939 obj->isr = isr;
940 obj->isr_priv = isr_priv;
941 mutex_unlock(&obj->iommu_lock);
942
943 return 0;
944}
945EXPORT_SYMBOL_GPL(iommu_set_isr);
946
947/*
948 * OMAP Device MMU(IOMMU) detection
949 */
950static int __devinit omap_iommu_probe(struct platform_device *pdev)
951{
952 int err = -ENODEV;
953 void *p;
954 int irq;
955 struct iommu *obj;
956 struct resource *res;
957 struct iommu_platform_data *pdata = pdev->dev.platform_data;
958
959 if (pdev->num_resources != 2)
960 return -EINVAL;
961
962 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
963 if (!obj)
964 return -ENOMEM;
965
966 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
967 if (IS_ERR(obj->clk))
968 goto err_clk;
969
970 obj->nr_tlb_entries = pdata->nr_tlb_entries;
971 obj->name = pdata->name;
972 obj->dev = &pdev->dev;
973 obj->ctx = (void *)obj + sizeof(*obj);
974 obj->da_start = pdata->da_start;
975 obj->da_end = pdata->da_end;
976
977 mutex_init(&obj->iommu_lock);
978 mutex_init(&obj->mmap_lock);
979 spin_lock_init(&obj->page_table_lock);
980 INIT_LIST_HEAD(&obj->mmap);
981
982 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
983 if (!res) {
984 err = -ENODEV;
985 goto err_mem;
986 }
987
988 res = request_mem_region(res->start, resource_size(res),
989 dev_name(&pdev->dev));
990 if (!res) {
991 err = -EIO;
992 goto err_mem;
993 }
994
995 obj->regbase = ioremap(res->start, resource_size(res));
996 if (!obj->regbase) {
997 err = -ENOMEM;
998 goto err_ioremap;
999 }
1000
1001 irq = platform_get_irq(pdev, 0);
1002 if (irq < 0) {
1003 err = -ENODEV;
1004 goto err_irq;
1005 }
1006 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1007 dev_name(&pdev->dev), obj);
1008 if (err < 0)
1009 goto err_irq;
1010 platform_set_drvdata(pdev, obj);
1011
1012 p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
1013 if (!p) {
1014 err = -ENOMEM;
1015 goto err_pgd;
1016 }
1017 memset(p, 0, IOPGD_TABLE_SIZE);
1018 clean_dcache_area(p, IOPGD_TABLE_SIZE);
1019 obj->iopgd = p;
1020
1021 BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
1022
1023 dev_info(&pdev->dev, "%s registered\n", obj->name);
1024 return 0;
1025
1026err_pgd:
1027 free_irq(irq, obj);
1028err_irq:
1029 iounmap(obj->regbase);
1030err_ioremap:
1031 release_mem_region(res->start, resource_size(res));
1032err_mem:
1033 clk_put(obj->clk);
1034err_clk:
1035 kfree(obj);
1036 return err;
1037}
1038
1039static int __devexit omap_iommu_remove(struct platform_device *pdev)
1040{
1041 int irq;
1042 struct resource *res;
1043 struct iommu *obj = platform_get_drvdata(pdev);
1044
1045 platform_set_drvdata(pdev, NULL);
1046
1047 iopgtable_clear_entry_all(obj);
1048 free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
1049
1050 irq = platform_get_irq(pdev, 0);
1051 free_irq(irq, obj);
1052 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1053 release_mem_region(res->start, resource_size(res));
1054 iounmap(obj->regbase);
1055
1056 clk_put(obj->clk);
1057 dev_info(&pdev->dev, "%s removed\n", obj->name);
1058 kfree(obj);
1059 return 0;
1060}
1061
1062static struct platform_driver omap_iommu_driver = {
1063 .probe = omap_iommu_probe,
1064 .remove = __devexit_p(omap_iommu_remove),
1065 .driver = {
1066 .name = "omap-iommu",
1067 },
1068};
1069
1070static void iopte_cachep_ctor(void *iopte)
1071{
1072 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1073}
1074
1075static int __init omap_iommu_init(void)
1076{
1077 struct kmem_cache *p;
1078 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1079 size_t align = 1 << 10; /* L2 pagetable alignement */
1080
1081 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1082 iopte_cachep_ctor);
1083 if (!p)
1084 return -ENOMEM;
1085 iopte_cachep = p;
1086
1087 return platform_driver_register(&omap_iommu_driver);
1088}
1089module_init(omap_iommu_init);
1090
1091static void __exit omap_iommu_exit(void)
1092{
1093 kmem_cache_destroy(iopte_cachep);
1094
1095 platform_driver_unregister(&omap_iommu_driver);
1096}
1097module_exit(omap_iommu_exit);
1098
1099MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1100MODULE_ALIAS("platform:omap-iommu");
1101MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1102MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
deleted file mode 100644
index 83a37c54342..00000000000
--- a/arch/arm/plat-omap/iovmm.c
+++ /dev/null
@@ -1,907 +0,0 @@
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18
19#include <asm/cacheflush.h>
20#include <asm/mach/map.h>
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25#include "iopgtable.h"
26
27/*
28 * A device driver needs to create address mappings between:
29 *
30 * - iommu/device address
31 * - physical address
32 * - mpu virtual address
33 *
34 * There are 4 possible patterns for them:
35 *
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
43 *
44 *
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
49 *
50 * 'c': contiguous memory area
51 * 'd': discontiguous memory area
52 * 'a': anonymous memory allocation
53 * '()': optional feature
54 *
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57 *
58 * '*': not yet, but feasible.
59 */
60
61static struct kmem_cache *iovm_area_cachep;
62
63/* return total bytes of sg buffers */
64static size_t sgtable_len(const struct sg_table *sgt)
65{
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
68
69 if (!sgt)
70 return 0;
71
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 size_t bytes;
74
75 bytes = sg_dma_len(sg);
76
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 __func__, i, bytes);
80 return 0;
81 }
82
83 total += bytes;
84 }
85
86 return total;
87}
88#define sgtable_ok(x) (!!sgtable_len(x))
89
90static unsigned max_alignment(u32 addr)
91{
92 int i;
93 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
94 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
95 ;
96 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
97}
98
99/*
100 * calculate the optimal number sg elements from total bytes based on
101 * iommu superpages
102 */
103static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
104{
105 unsigned nr_entries = 0, ent_sz;
106
107 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
108 pr_err("%s: wrong size %08x\n", __func__, bytes);
109 return 0;
110 }
111
112 while (bytes) {
113 ent_sz = max_alignment(da | pa);
114 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
115 nr_entries++;
116 da += ent_sz;
117 pa += ent_sz;
118 bytes -= ent_sz;
119 }
120
121 return nr_entries;
122}
123
124/* allocate and initialize sg_table header(a kind of 'superblock') */
125static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
126 u32 da, u32 pa)
127{
128 unsigned int nr_entries;
129 int err;
130 struct sg_table *sgt;
131
132 if (!bytes)
133 return ERR_PTR(-EINVAL);
134
135 if (!IS_ALIGNED(bytes, PAGE_SIZE))
136 return ERR_PTR(-EINVAL);
137
138 if (flags & IOVMF_LINEAR) {
139 nr_entries = sgtable_nents(bytes, da, pa);
140 if (!nr_entries)
141 return ERR_PTR(-EINVAL);
142 } else
143 nr_entries = bytes / PAGE_SIZE;
144
145 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
146 if (!sgt)
147 return ERR_PTR(-ENOMEM);
148
149 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
150 if (err) {
151 kfree(sgt);
152 return ERR_PTR(err);
153 }
154
155 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
156
157 return sgt;
158}
159
160/* free sg_table header(a kind of superblock) */
161static void sgtable_free(struct sg_table *sgt)
162{
163 if (!sgt)
164 return;
165
166 sg_free_table(sgt);
167 kfree(sgt);
168
169 pr_debug("%s: sgt:%p\n", __func__, sgt);
170}
171
172/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
173static void *vmap_sg(const struct sg_table *sgt)
174{
175 u32 va;
176 size_t total;
177 unsigned int i;
178 struct scatterlist *sg;
179 struct vm_struct *new;
180 const struct mem_type *mtype;
181
182 mtype = get_mem_type(MT_DEVICE);
183 if (!mtype)
184 return ERR_PTR(-EINVAL);
185
186 total = sgtable_len(sgt);
187 if (!total)
188 return ERR_PTR(-EINVAL);
189
190 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
191 if (!new)
192 return ERR_PTR(-ENOMEM);
193 va = (u32)new->addr;
194
195 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
196 size_t bytes;
197 u32 pa;
198 int err;
199
200 pa = sg_phys(sg);
201 bytes = sg_dma_len(sg);
202
203 BUG_ON(bytes != PAGE_SIZE);
204
205 err = ioremap_page(va, pa, mtype);
206 if (err)
207 goto err_out;
208
209 va += bytes;
210 }
211
212 flush_cache_vmap((unsigned long)new->addr,
213 (unsigned long)(new->addr + total));
214 return new->addr;
215
216err_out:
217 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
218 vunmap(new->addr);
219 return ERR_PTR(-EAGAIN);
220}
221
222static inline void vunmap_sg(const void *va)
223{
224 vunmap(va);
225}
226
227static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
228{
229 struct iovm_struct *tmp;
230
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
233 size_t len;
234
235 len = tmp->da_end - tmp->da_start;
236
237 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
238 __func__, tmp->da_start, da, tmp->da_end, len,
239 tmp->flags);
240
241 return tmp;
242 }
243 }
244
245 return NULL;
246}
247
248/**
249 * find_iovm_area - find iovma which includes @da
250 * @da: iommu device virtual address
251 *
252 * Find the existing iovma starting at @da
253 */
254struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
255{
256 struct iovm_struct *area;
257
258 mutex_lock(&obj->mmap_lock);
259 area = __find_iovm_area(obj, da);
260 mutex_unlock(&obj->mmap_lock);
261
262 return area;
263}
264EXPORT_SYMBOL_GPL(find_iovm_area);
265
266/*
267 * This finds the hole(area) which fits the requested address and len
268 * in iovmas mmap, and returns the new allocated iovma.
269 */
270static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
271 size_t bytes, u32 flags)
272{
273 struct iovm_struct *new, *tmp;
274 u32 start, prev_end, alignment;
275
276 if (!obj || !bytes)
277 return ERR_PTR(-EINVAL);
278
279 start = da;
280 alignment = PAGE_SIZE;
281
282 if (~flags & IOVMF_DA_FIXED) {
283 /* Don't map address 0 */
284 start = obj->da_start ? obj->da_start : alignment;
285
286 if (flags & IOVMF_LINEAR)
287 alignment = iopgsz_max(bytes);
288 start = roundup(start, alignment);
289 } else if (start < obj->da_start || start > obj->da_end ||
290 obj->da_end - start < bytes) {
291 return ERR_PTR(-EINVAL);
292 }
293
294 tmp = NULL;
295 if (list_empty(&obj->mmap))
296 goto found;
297
298 prev_end = 0;
299 list_for_each_entry(tmp, &obj->mmap, list) {
300
301 if (prev_end > start)
302 break;
303
304 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
305 goto found;
306
307 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
308 start = roundup(tmp->da_end + 1, alignment);
309
310 prev_end = tmp->da_end;
311 }
312
313 if ((start >= prev_end) && (obj->da_end - start >= bytes))
314 goto found;
315
316 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
317 __func__, da, bytes, flags);
318
319 return ERR_PTR(-EINVAL);
320
321found:
322 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
323 if (!new)
324 return ERR_PTR(-ENOMEM);
325
326 new->iommu = obj;
327 new->da_start = start;
328 new->da_end = start + bytes;
329 new->flags = flags;
330
331 /*
332 * keep ascending order of iovmas
333 */
334 if (tmp)
335 list_add_tail(&new->list, &tmp->list);
336 else
337 list_add(&new->list, &obj->mmap);
338
339 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
340 __func__, new->da_start, start, new->da_end, bytes, flags);
341
342 return new;
343}
344
345static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
346{
347 size_t bytes;
348
349 BUG_ON(!obj || !area);
350
351 bytes = area->da_end - area->da_start;
352
353 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
354 __func__, area->da_start, area->da_end, bytes, area->flags);
355
356 list_del(&area->list);
357 kmem_cache_free(iovm_area_cachep, area);
358}
359
360/**
361 * da_to_va - convert (d) to (v)
362 * @obj: objective iommu
363 * @da: iommu device virtual address
364 * @va: mpu virtual address
365 *
366 * Returns mpu virtual addr which corresponds to a given device virtual addr
367 */
368void *da_to_va(struct iommu *obj, u32 da)
369{
370 void *va = NULL;
371 struct iovm_struct *area;
372
373 mutex_lock(&obj->mmap_lock);
374
375 area = __find_iovm_area(obj, da);
376 if (!area) {
377 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
378 goto out;
379 }
380 va = area->va;
381out:
382 mutex_unlock(&obj->mmap_lock);
383
384 return va;
385}
386EXPORT_SYMBOL_GPL(da_to_va);
387
388static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
389{
390 unsigned int i;
391 struct scatterlist *sg;
392 void *va = _va;
393 void *va_end;
394
395 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
396 struct page *pg;
397 const size_t bytes = PAGE_SIZE;
398
399 /*
400 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
401 */
402 pg = vmalloc_to_page(va);
403 BUG_ON(!pg);
404 sg_set_page(sg, pg, bytes, 0);
405
406 va += bytes;
407 }
408
409 va_end = _va + PAGE_SIZE * i;
410}
411
412static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
413{
414 /*
415 * Actually this is not necessary at all, just exists for
416 * consistency of the code readability.
417 */
418 BUG_ON(!sgt);
419}
420
421static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
422 size_t len)
423{
424 unsigned int i;
425 struct scatterlist *sg;
426 void *va;
427
428 va = phys_to_virt(pa);
429
430 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
431 unsigned bytes;
432
433 bytes = max_alignment(da | pa);
434 bytes = min_t(unsigned, bytes, iopgsz_max(len));
435
436 BUG_ON(!iopgsz_ok(bytes));
437
438 sg_set_buf(sg, phys_to_virt(pa), bytes);
439 /*
440 * 'pa' is cotinuous(linear).
441 */
442 pa += bytes;
443 da += bytes;
444 len -= bytes;
445 }
446 BUG_ON(len);
447}
448
449static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
450{
451 /*
452 * Actually this is not necessary at all, just exists for
453 * consistency of the code readability
454 */
455 BUG_ON(!sgt);
456}
457
458/* create 'da' <-> 'pa' mapping from 'sgt' */
459static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
460 const struct sg_table *sgt, u32 flags)
461{
462 int err;
463 unsigned int i, j;
464 struct scatterlist *sg;
465 u32 da = new->da_start;
466
467 if (!obj || !sgt)
468 return -EINVAL;
469
470 BUG_ON(!sgtable_ok(sgt));
471
472 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
473 u32 pa;
474 int pgsz;
475 size_t bytes;
476 struct iotlb_entry e;
477
478 pa = sg_phys(sg);
479 bytes = sg_dma_len(sg);
480
481 flags &= ~IOVMF_PGSZ_MASK;
482 pgsz = bytes_to_iopgsz(bytes);
483 if (pgsz < 0)
484 goto err_out;
485 flags |= pgsz;
486
487 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
488 i, da, pa, bytes);
489
490 iotlb_init_entry(&e, da, pa, flags);
491 err = iopgtable_store_entry(obj, &e);
492 if (err)
493 goto err_out;
494
495 da += bytes;
496 }
497 return 0;
498
499err_out:
500 da = new->da_start;
501
502 for_each_sg(sgt->sgl, sg, i, j) {
503 size_t bytes;
504
505 bytes = iopgtable_clear_entry(obj, da);
506
507 BUG_ON(!iopgsz_ok(bytes));
508
509 da += bytes;
510 }
511 return err;
512}
513
514/* release 'da' <-> 'pa' mapping */
515static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
516{
517 u32 start;
518 size_t total = area->da_end - area->da_start;
519
520 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
521
522 start = area->da_start;
523 while (total > 0) {
524 size_t bytes;
525
526 bytes = iopgtable_clear_entry(obj, start);
527 if (bytes == 0)
528 bytes = PAGE_SIZE;
529 else
530 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
531 __func__, start, bytes, area->flags);
532
533 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
534
535 total -= bytes;
536 start += bytes;
537 }
538 BUG_ON(total);
539}
540
541/* template function for all unmapping */
542static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
543 void (*fn)(const void *), u32 flags)
544{
545 struct sg_table *sgt = NULL;
546 struct iovm_struct *area;
547
548 if (!IS_ALIGNED(da, PAGE_SIZE)) {
549 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
550 return NULL;
551 }
552
553 mutex_lock(&obj->mmap_lock);
554
555 area = __find_iovm_area(obj, da);
556 if (!area) {
557 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
558 goto out;
559 }
560
561 if ((area->flags & flags) != flags) {
562 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
563 area->flags);
564 goto out;
565 }
566 sgt = (struct sg_table *)area->sgt;
567
568 unmap_iovm_area(obj, area);
569
570 fn(area->va);
571
572 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
573 area->da_start, da, area->da_end,
574 area->da_end - area->da_start, area->flags);
575
576 free_iovm_area(obj, area);
577out:
578 mutex_unlock(&obj->mmap_lock);
579
580 return sgt;
581}
582
583static u32 map_iommu_region(struct iommu *obj, u32 da,
584 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
585{
586 int err = -ENOMEM;
587 struct iovm_struct *new;
588
589 mutex_lock(&obj->mmap_lock);
590
591 new = alloc_iovm_area(obj, da, bytes, flags);
592 if (IS_ERR(new)) {
593 err = PTR_ERR(new);
594 goto err_alloc_iovma;
595 }
596 new->va = va;
597 new->sgt = sgt;
598
599 if (map_iovm_area(obj, new, sgt, new->flags))
600 goto err_map;
601
602 mutex_unlock(&obj->mmap_lock);
603
604 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
605 __func__, new->da_start, bytes, new->flags, va);
606
607 return new->da_start;
608
609err_map:
610 free_iovm_area(obj, new);
611err_alloc_iovma:
612 mutex_unlock(&obj->mmap_lock);
613 return err;
614}
615
616static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
617 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
618{
619 return map_iommu_region(obj, da, sgt, va, bytes, flags);
620}
621
622/**
623 * iommu_vmap - (d)-(p)-(v) address mapper
624 * @obj: objective iommu
625 * @sgt: address of scatter gather table
626 * @flags: iovma and page property
627 *
628 * Creates 1-n-1 mapping with given @sgt and returns @da.
629 * All @sgt element must be io page size aligned.
630 */
631u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
632 u32 flags)
633{
634 size_t bytes;
635 void *va = NULL;
636
637 if (!obj || !obj->dev || !sgt)
638 return -EINVAL;
639
640 bytes = sgtable_len(sgt);
641 if (!bytes)
642 return -EINVAL;
643 bytes = PAGE_ALIGN(bytes);
644
645 if (flags & IOVMF_MMIO) {
646 va = vmap_sg(sgt);
647 if (IS_ERR(va))
648 return PTR_ERR(va);
649 }
650
651 flags |= IOVMF_DISCONT;
652 flags |= IOVMF_MMIO;
653
654 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
655 if (IS_ERR_VALUE(da))
656 vunmap_sg(va);
657
658 return da;
659}
660EXPORT_SYMBOL_GPL(iommu_vmap);
661
662/**
663 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
664 * @obj: objective iommu
665 * @da: iommu device virtual address
666 *
667 * Free the iommu virtually contiguous memory area starting at
668 * @da, which was returned by 'iommu_vmap()'.
669 */
670struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
671{
672 struct sg_table *sgt;
673 /*
674 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
675 * Just returns 'sgt' to the caller to free
676 */
677 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
678 if (!sgt)
679 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
680 return sgt;
681}
682EXPORT_SYMBOL_GPL(iommu_vunmap);
683
684/**
685 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
686 * @obj: objective iommu
687 * @da: contiguous iommu virtual memory
688 * @bytes: allocation size
689 * @flags: iovma and page property
690 *
691 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
692 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
693 */
694u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
695{
696 void *va;
697 struct sg_table *sgt;
698
699 if (!obj || !obj->dev || !bytes)
700 return -EINVAL;
701
702 bytes = PAGE_ALIGN(bytes);
703
704 va = vmalloc(bytes);
705 if (!va)
706 return -ENOMEM;
707
708 flags |= IOVMF_DISCONT;
709 flags |= IOVMF_ALLOC;
710
711 sgt = sgtable_alloc(bytes, flags, da, 0);
712 if (IS_ERR(sgt)) {
713 da = PTR_ERR(sgt);
714 goto err_sgt_alloc;
715 }
716 sgtable_fill_vmalloc(sgt, va);
717
718 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
719 if (IS_ERR_VALUE(da))
720 goto err_iommu_vmap;
721
722 return da;
723
724err_iommu_vmap:
725 sgtable_drain_vmalloc(sgt);
726 sgtable_free(sgt);
727err_sgt_alloc:
728 vfree(va);
729 return da;
730}
731EXPORT_SYMBOL_GPL(iommu_vmalloc);
732
733/**
734 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
735 * @obj: objective iommu
736 * @da: iommu device virtual address
737 *
738 * Frees the iommu virtually continuous memory area starting at
739 * @da, as obtained from 'iommu_vmalloc()'.
740 */
741void iommu_vfree(struct iommu *obj, const u32 da)
742{
743 struct sg_table *sgt;
744
745 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
746 if (!sgt)
747 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
748 sgtable_free(sgt);
749}
750EXPORT_SYMBOL_GPL(iommu_vfree);
751
752static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
753 size_t bytes, u32 flags)
754{
755 struct sg_table *sgt;
756
757 sgt = sgtable_alloc(bytes, flags, da, pa);
758 if (IS_ERR(sgt))
759 return PTR_ERR(sgt);
760
761 sgtable_fill_kmalloc(sgt, pa, da, bytes);
762
763 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
764 if (IS_ERR_VALUE(da)) {
765 sgtable_drain_kmalloc(sgt);
766 sgtable_free(sgt);
767 }
768
769 return da;
770}
771
772/**
773 * iommu_kmap - (d)-(p)-(v) address mapper
774 * @obj: objective iommu
775 * @da: contiguous iommu virtual memory
776 * @pa: contiguous physical memory
777 * @flags: iovma and page property
778 *
779 * Creates 1-1-1 mapping and returns @da again, which can be
780 * adjusted if 'IOVMF_DA_FIXED' is not set.
781 */
782u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
783 u32 flags)
784{
785 void *va;
786
787 if (!obj || !obj->dev || !bytes)
788 return -EINVAL;
789
790 bytes = PAGE_ALIGN(bytes);
791
792 va = ioremap(pa, bytes);
793 if (!va)
794 return -ENOMEM;
795
796 flags |= IOVMF_LINEAR;
797 flags |= IOVMF_MMIO;
798
799 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
800 if (IS_ERR_VALUE(da))
801 iounmap(va);
802
803 return da;
804}
805EXPORT_SYMBOL_GPL(iommu_kmap);
806
807/**
808 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
809 * @obj: objective iommu
810 * @da: iommu device virtual address
811 *
812 * Frees the iommu virtually contiguous memory area starting at
813 * @da, which was passed to and was returned by'iommu_kmap()'.
814 */
815void iommu_kunmap(struct iommu *obj, u32 da)
816{
817 struct sg_table *sgt;
818 typedef void (*func_t)(const void *);
819
820 sgt = unmap_vm_area(obj, da, (func_t)iounmap,
821 IOVMF_LINEAR | IOVMF_MMIO);
822 if (!sgt)
823 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
824 sgtable_free(sgt);
825}
826EXPORT_SYMBOL_GPL(iommu_kunmap);
827
828/**
829 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
830 * @obj: objective iommu
831 * @da: contiguous iommu virtual memory
832 * @bytes: bytes for allocation
833 * @flags: iovma and page property
834 *
835 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
836 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
837 */
838u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
839{
840 void *va;
841 u32 pa;
842
843 if (!obj || !obj->dev || !bytes)
844 return -EINVAL;
845
846 bytes = PAGE_ALIGN(bytes);
847
848 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
849 if (!va)
850 return -ENOMEM;
851 pa = virt_to_phys(va);
852
853 flags |= IOVMF_LINEAR;
854 flags |= IOVMF_ALLOC;
855
856 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
857 if (IS_ERR_VALUE(da))
858 kfree(va);
859
860 return da;
861}
862EXPORT_SYMBOL_GPL(iommu_kmalloc);
863
864/**
865 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
866 * @obj: objective iommu
867 * @da: iommu device virtual address
868 *
869 * Frees the iommu virtually contiguous memory area starting at
870 * @da, which was passed to and was returned by'iommu_kmalloc()'.
871 */
872void iommu_kfree(struct iommu *obj, u32 da)
873{
874 struct sg_table *sgt;
875
876 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
877 if (!sgt)
878 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
879 sgtable_free(sgt);
880}
881EXPORT_SYMBOL_GPL(iommu_kfree);
882
883
884static int __init iovmm_init(void)
885{
886 const unsigned long flags = SLAB_HWCACHE_ALIGN;
887 struct kmem_cache *p;
888
889 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
890 flags, NULL);
891 if (!p)
892 return -ENOMEM;
893 iovm_area_cachep = p;
894
895 return 0;
896}
897module_init(iovmm_init);
898
899static void __exit iovmm_exit(void)
900{
901 kmem_cache_destroy(iovm_area_cachep);
902}
903module_exit(iovmm_exit);
904
905MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
906MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
907MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 5587acf0eb2..6c62af10871 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -16,8 +16,6 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/wait.h>
20#include <linux/completion.h>
21#include <linux/interrupt.h> 19#include <linux/interrupt.h>
22#include <linux/err.h> 20#include <linux/err.h>
23#include <linux/clk.h> 21#include <linux/clk.h>
@@ -25,7 +23,6 @@
25#include <linux/io.h> 23#include <linux/io.h>
26#include <linux/slab.h> 24#include <linux/slab.h>
27 25
28#include <plat/dma.h>
29#include <plat/mcbsp.h> 26#include <plat/mcbsp.h>
30#include <plat/omap_device.h> 27#include <plat/omap_device.h>
31#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
@@ -136,8 +133,6 @@ static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
136 irqst_spcr2); 133 irqst_spcr2);
137 /* Writing zero to XSYNC_ERR clears the IRQ */ 134 /* Writing zero to XSYNC_ERR clears the IRQ */
138 MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2)); 135 MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2));
139 } else {
140 complete(&mcbsp_tx->tx_irq_completion);
141 } 136 }
142 137
143 return IRQ_HANDLED; 138 return IRQ_HANDLED;
@@ -156,41 +151,11 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
156 irqst_spcr1); 151 irqst_spcr1);
157 /* Writing zero to RSYNC_ERR clears the IRQ */ 152 /* Writing zero to RSYNC_ERR clears the IRQ */
158 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); 153 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
159 } else {
160 complete(&mcbsp_rx->rx_irq_completion);
161 } 154 }
162 155
163 return IRQ_HANDLED; 156 return IRQ_HANDLED;
164} 157}
165 158
166static void omap_mcbsp_tx_dma_callback(int lch, u16 ch_status, void *data)
167{
168 struct omap_mcbsp *mcbsp_dma_tx = data;
169
170 dev_dbg(mcbsp_dma_tx->dev, "TX DMA callback : 0x%x\n",
171 MCBSP_READ(mcbsp_dma_tx, SPCR2));
172
173 /* We can free the channels */
174 omap_free_dma(mcbsp_dma_tx->dma_tx_lch);
175 mcbsp_dma_tx->dma_tx_lch = -1;
176
177 complete(&mcbsp_dma_tx->tx_dma_completion);
178}
179
180static void omap_mcbsp_rx_dma_callback(int lch, u16 ch_status, void *data)
181{
182 struct omap_mcbsp *mcbsp_dma_rx = data;
183
184 dev_dbg(mcbsp_dma_rx->dev, "RX DMA callback : 0x%x\n",
185 MCBSP_READ(mcbsp_dma_rx, SPCR2));
186
187 /* We can free the channels */
188 omap_free_dma(mcbsp_dma_rx->dma_rx_lch);
189 mcbsp_dma_rx->dma_rx_lch = -1;
190
191 complete(&mcbsp_dma_rx->rx_dma_completion);
192}
193
194/* 159/*
195 * omap_mcbsp_config simply write a config to the 160 * omap_mcbsp_config simply write a config to the
196 * appropriate McBSP. 161 * appropriate McBSP.
@@ -758,37 +723,6 @@ static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
758static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {} 723static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
759#endif 724#endif
760 725
761/*
762 * We can choose between IRQ based or polled IO.
763 * This needs to be called before omap_mcbsp_request().
764 */
765int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type)
766{
767 struct omap_mcbsp *mcbsp;
768
769 if (!omap_mcbsp_check_valid_id(id)) {
770 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
771 return -ENODEV;
772 }
773 mcbsp = id_to_mcbsp_ptr(id);
774
775 spin_lock(&mcbsp->lock);
776
777 if (!mcbsp->free) {
778 dev_err(mcbsp->dev, "McBSP%d is currently in use\n",
779 mcbsp->id);
780 spin_unlock(&mcbsp->lock);
781 return -EINVAL;
782 }
783
784 mcbsp->io_type = io_type;
785
786 spin_unlock(&mcbsp->lock);
787
788 return 0;
789}
790EXPORT_SYMBOL(omap_mcbsp_set_io_type);
791
792int omap_mcbsp_request(unsigned int id) 726int omap_mcbsp_request(unsigned int id)
793{ 727{
794 struct omap_mcbsp *mcbsp; 728 struct omap_mcbsp *mcbsp;
@@ -833,29 +767,24 @@ int omap_mcbsp_request(unsigned int id)
833 MCBSP_WRITE(mcbsp, SPCR1, 0); 767 MCBSP_WRITE(mcbsp, SPCR1, 0);
834 MCBSP_WRITE(mcbsp, SPCR2, 0); 768 MCBSP_WRITE(mcbsp, SPCR2, 0);
835 769
836 if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) { 770 err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
837 /* We need to get IRQs here */ 771 0, "McBSP", (void *)mcbsp);
838 init_completion(&mcbsp->tx_irq_completion); 772 if (err != 0) {
839 err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 773 dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
840 0, "McBSP", (void *)mcbsp); 774 "for McBSP%d\n", mcbsp->tx_irq,
775 mcbsp->id);
776 goto err_clk_disable;
777 }
778
779 if (mcbsp->rx_irq) {
780 err = request_irq(mcbsp->rx_irq,
781 omap_mcbsp_rx_irq_handler,
782 0, "McBSP", (void *)mcbsp);
841 if (err != 0) { 783 if (err != 0) {
842 dev_err(mcbsp->dev, "Unable to request TX IRQ %d " 784 dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
843 "for McBSP%d\n", mcbsp->tx_irq, 785 "for McBSP%d\n", mcbsp->rx_irq,
844 mcbsp->id); 786 mcbsp->id);
845 goto err_clk_disable; 787 goto err_free_irq;
846 }
847
848 if (mcbsp->rx_irq) {
849 init_completion(&mcbsp->rx_irq_completion);
850 err = request_irq(mcbsp->rx_irq,
851 omap_mcbsp_rx_irq_handler,
852 0, "McBSP", (void *)mcbsp);
853 if (err != 0) {
854 dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
855 "for McBSP%d\n", mcbsp->rx_irq,
856 mcbsp->id);
857 goto err_free_irq;
858 }
859 } 788 }
860 } 789 }
861 790
@@ -901,12 +830,9 @@ void omap_mcbsp_free(unsigned int id)
901 830
902 pm_runtime_put_sync(mcbsp->dev); 831 pm_runtime_put_sync(mcbsp->dev);
903 832
904 if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) { 833 if (mcbsp->rx_irq)
905 /* Free IRQs */ 834 free_irq(mcbsp->rx_irq, (void *)mcbsp);
906 if (mcbsp->rx_irq) 835 free_irq(mcbsp->tx_irq, (void *)mcbsp);
907 free_irq(mcbsp->rx_irq, (void *)mcbsp);
908 free_irq(mcbsp->tx_irq, (void *)mcbsp);
909 }
910 836
911 reg_cache = mcbsp->reg_cache; 837 reg_cache = mcbsp->reg_cache;
912 838
@@ -943,9 +869,6 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
943 if (cpu_is_omap34xx()) 869 if (cpu_is_omap34xx())
944 omap_st_start(mcbsp); 870 omap_st_start(mcbsp);
945 871
946 mcbsp->rx_word_length = (MCBSP_READ_CACHE(mcbsp, RCR1) >> 5) & 0x7;
947 mcbsp->tx_word_length = (MCBSP_READ_CACHE(mcbsp, XCR1) >> 5) & 0x7;
948
949 /* Only enable SRG, if McBSP is master */ 872 /* Only enable SRG, if McBSP is master */
950 w = MCBSP_READ_CACHE(mcbsp, PCR0); 873 w = MCBSP_READ_CACHE(mcbsp, PCR0);
951 if (w & (FSXM | FSRM | CLKXM | CLKRM)) 874 if (w & (FSXM | FSRM | CLKXM | CLKRM))
@@ -1043,484 +966,32 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
1043} 966}
1044EXPORT_SYMBOL(omap_mcbsp_stop); 967EXPORT_SYMBOL(omap_mcbsp_stop);
1045 968
1046/* polled mcbsp i/o operations */
1047int omap_mcbsp_pollwrite(unsigned int id, u16 buf)
1048{
1049 struct omap_mcbsp *mcbsp;
1050
1051 if (!omap_mcbsp_check_valid_id(id)) {
1052 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1053 return -ENODEV;
1054 }
1055
1056 mcbsp = id_to_mcbsp_ptr(id);
1057
1058 MCBSP_WRITE(mcbsp, DXR1, buf);
1059 /* if frame sync error - clear the error */
1060 if (MCBSP_READ(mcbsp, SPCR2) & XSYNC_ERR) {
1061 /* clear error */
1062 MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2));
1063 /* resend */
1064 return -1;
1065 } else {
1066 /* wait for transmit confirmation */
1067 int attemps = 0;
1068 while (!(MCBSP_READ(mcbsp, SPCR2) & XRDY)) {
1069 if (attemps++ > 1000) {
1070 MCBSP_WRITE(mcbsp, SPCR2,
1071 MCBSP_READ_CACHE(mcbsp, SPCR2) &
1072 (~XRST));
1073 udelay(10);
1074 MCBSP_WRITE(mcbsp, SPCR2,
1075 MCBSP_READ_CACHE(mcbsp, SPCR2) |
1076 (XRST));
1077 udelay(10);
1078 dev_err(mcbsp->dev, "Could not write to"
1079 " McBSP%d Register\n", mcbsp->id);
1080 return -2;
1081 }
1082 }
1083 }
1084
1085 return 0;
1086}
1087EXPORT_SYMBOL(omap_mcbsp_pollwrite);
1088
1089int omap_mcbsp_pollread(unsigned int id, u16 *buf)
1090{
1091 struct omap_mcbsp *mcbsp;
1092
1093 if (!omap_mcbsp_check_valid_id(id)) {
1094 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1095 return -ENODEV;
1096 }
1097 mcbsp = id_to_mcbsp_ptr(id);
1098
1099 /* if frame sync error - clear the error */
1100 if (MCBSP_READ(mcbsp, SPCR1) & RSYNC_ERR) {
1101 /* clear error */
1102 MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1));
1103 /* resend */
1104 return -1;
1105 } else {
1106 /* wait for receive confirmation */
1107 int attemps = 0;
1108 while (!(MCBSP_READ(mcbsp, SPCR1) & RRDY)) {
1109 if (attemps++ > 1000) {
1110 MCBSP_WRITE(mcbsp, SPCR1,
1111 MCBSP_READ_CACHE(mcbsp, SPCR1) &
1112 (~RRST));
1113 udelay(10);
1114 MCBSP_WRITE(mcbsp, SPCR1,
1115 MCBSP_READ_CACHE(mcbsp, SPCR1) |
1116 (RRST));
1117 udelay(10);
1118 dev_err(mcbsp->dev, "Could not read from"
1119 " McBSP%d Register\n", mcbsp->id);
1120 return -2;
1121 }
1122 }
1123 }
1124 *buf = MCBSP_READ(mcbsp, DRR1);
1125
1126 return 0;
1127}
1128EXPORT_SYMBOL(omap_mcbsp_pollread);
1129
1130/*
1131 * IRQ based word transmission.
1132 */
1133void omap_mcbsp_xmit_word(unsigned int id, u32 word)
1134{
1135 struct omap_mcbsp *mcbsp;
1136 omap_mcbsp_word_length word_length;
1137
1138 if (!omap_mcbsp_check_valid_id(id)) {
1139 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1140 return;
1141 }
1142
1143 mcbsp = id_to_mcbsp_ptr(id);
1144 word_length = mcbsp->tx_word_length;
1145
1146 wait_for_completion(&mcbsp->tx_irq_completion);
1147
1148 if (word_length > OMAP_MCBSP_WORD_16)
1149 MCBSP_WRITE(mcbsp, DXR2, word >> 16);
1150 MCBSP_WRITE(mcbsp, DXR1, word & 0xffff);
1151}
1152EXPORT_SYMBOL(omap_mcbsp_xmit_word);
1153
1154u32 omap_mcbsp_recv_word(unsigned int id)
1155{
1156 struct omap_mcbsp *mcbsp;
1157 u16 word_lsb, word_msb = 0;
1158 omap_mcbsp_word_length word_length;
1159
1160 if (!omap_mcbsp_check_valid_id(id)) {
1161 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1162 return -ENODEV;
1163 }
1164 mcbsp = id_to_mcbsp_ptr(id);
1165
1166 word_length = mcbsp->rx_word_length;
1167
1168 wait_for_completion(&mcbsp->rx_irq_completion);
1169
1170 if (word_length > OMAP_MCBSP_WORD_16)
1171 word_msb = MCBSP_READ(mcbsp, DRR2);
1172 word_lsb = MCBSP_READ(mcbsp, DRR1);
1173
1174 return (word_lsb | (word_msb << 16));
1175}
1176EXPORT_SYMBOL(omap_mcbsp_recv_word);
1177
1178int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word)
1179{
1180 struct omap_mcbsp *mcbsp;
1181 omap_mcbsp_word_length tx_word_length;
1182 omap_mcbsp_word_length rx_word_length;
1183 u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
1184
1185 if (!omap_mcbsp_check_valid_id(id)) {
1186 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1187 return -ENODEV;
1188 }
1189 mcbsp = id_to_mcbsp_ptr(id);
1190 tx_word_length = mcbsp->tx_word_length;
1191 rx_word_length = mcbsp->rx_word_length;
1192
1193 if (tx_word_length != rx_word_length)
1194 return -EINVAL;
1195
1196 /* First we wait for the transmitter to be ready */
1197 spcr2 = MCBSP_READ(mcbsp, SPCR2);
1198 while (!(spcr2 & XRDY)) {
1199 spcr2 = MCBSP_READ(mcbsp, SPCR2);
1200 if (attempts++ > 1000) {
1201 /* We must reset the transmitter */
1202 MCBSP_WRITE(mcbsp, SPCR2,
1203 MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XRST));
1204 udelay(10);
1205 MCBSP_WRITE(mcbsp, SPCR2,
1206 MCBSP_READ_CACHE(mcbsp, SPCR2) | XRST);
1207 udelay(10);
1208 dev_err(mcbsp->dev, "McBSP%d transmitter not "
1209 "ready\n", mcbsp->id);
1210 return -EAGAIN;
1211 }
1212 }
1213
1214 /* Now we can push the data */
1215 if (tx_word_length > OMAP_MCBSP_WORD_16)
1216 MCBSP_WRITE(mcbsp, DXR2, word >> 16);
1217 MCBSP_WRITE(mcbsp, DXR1, word & 0xffff);
1218
1219 /* We wait for the receiver to be ready */
1220 spcr1 = MCBSP_READ(mcbsp, SPCR1);
1221 while (!(spcr1 & RRDY)) {
1222 spcr1 = MCBSP_READ(mcbsp, SPCR1);
1223 if (attempts++ > 1000) {
1224 /* We must reset the receiver */
1225 MCBSP_WRITE(mcbsp, SPCR1,
1226 MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RRST));
1227 udelay(10);
1228 MCBSP_WRITE(mcbsp, SPCR1,
1229 MCBSP_READ_CACHE(mcbsp, SPCR1) | RRST);
1230 udelay(10);
1231 dev_err(mcbsp->dev, "McBSP%d receiver not "
1232 "ready\n", mcbsp->id);
1233 return -EAGAIN;
1234 }
1235 }
1236
1237 /* Receiver is ready, let's read the dummy data */
1238 if (rx_word_length > OMAP_MCBSP_WORD_16)
1239 word_msb = MCBSP_READ(mcbsp, DRR2);
1240 word_lsb = MCBSP_READ(mcbsp, DRR1);
1241
1242 return 0;
1243}
1244EXPORT_SYMBOL(omap_mcbsp_spi_master_xmit_word_poll);
1245
1246int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 *word)
1247{
1248 struct omap_mcbsp *mcbsp;
1249 u32 clock_word = 0;
1250 omap_mcbsp_word_length tx_word_length;
1251 omap_mcbsp_word_length rx_word_length;
1252 u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
1253
1254 if (!omap_mcbsp_check_valid_id(id)) {
1255 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1256 return -ENODEV;
1257 }
1258
1259 mcbsp = id_to_mcbsp_ptr(id);
1260
1261 tx_word_length = mcbsp->tx_word_length;
1262 rx_word_length = mcbsp->rx_word_length;
1263
1264 if (tx_word_length != rx_word_length)
1265 return -EINVAL;
1266
1267 /* First we wait for the transmitter to be ready */
1268 spcr2 = MCBSP_READ(mcbsp, SPCR2);
1269 while (!(spcr2 & XRDY)) {
1270 spcr2 = MCBSP_READ(mcbsp, SPCR2);
1271 if (attempts++ > 1000) {
1272 /* We must reset the transmitter */
1273 MCBSP_WRITE(mcbsp, SPCR2,
1274 MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XRST));
1275 udelay(10);
1276 MCBSP_WRITE(mcbsp, SPCR2,
1277 MCBSP_READ_CACHE(mcbsp, SPCR2) | XRST);
1278 udelay(10);
1279 dev_err(mcbsp->dev, "McBSP%d transmitter not "
1280 "ready\n", mcbsp->id);
1281 return -EAGAIN;
1282 }
1283 }
1284
1285 /* We first need to enable the bus clock */
1286 if (tx_word_length > OMAP_MCBSP_WORD_16)
1287 MCBSP_WRITE(mcbsp, DXR2, clock_word >> 16);
1288 MCBSP_WRITE(mcbsp, DXR1, clock_word & 0xffff);
1289
1290 /* We wait for the receiver to be ready */
1291 spcr1 = MCBSP_READ(mcbsp, SPCR1);
1292 while (!(spcr1 & RRDY)) {
1293 spcr1 = MCBSP_READ(mcbsp, SPCR1);
1294 if (attempts++ > 1000) {
1295 /* We must reset the receiver */
1296 MCBSP_WRITE(mcbsp, SPCR1,
1297 MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RRST));
1298 udelay(10);
1299 MCBSP_WRITE(mcbsp, SPCR1,
1300 MCBSP_READ_CACHE(mcbsp, SPCR1) | RRST);
1301 udelay(10);
1302 dev_err(mcbsp->dev, "McBSP%d receiver not "
1303 "ready\n", mcbsp->id);
1304 return -EAGAIN;
1305 }
1306 }
1307
1308 /* Receiver is ready, there is something for us */
1309 if (rx_word_length > OMAP_MCBSP_WORD_16)
1310 word_msb = MCBSP_READ(mcbsp, DRR2);
1311 word_lsb = MCBSP_READ(mcbsp, DRR1);
1312
1313 word[0] = (word_lsb | (word_msb << 16));
1314
1315 return 0;
1316}
1317EXPORT_SYMBOL(omap_mcbsp_spi_master_recv_word_poll);
1318
1319/* 969/*
1320 * Simple DMA based buffer rx/tx routines. 970 * The following functions are only required on an OMAP1-only build.
1321 * Nothing fancy, just a single buffer tx/rx through DMA. 971 * mach-omap2/mcbsp.c contains the real functions
1322 * The DMA resources are released once the transfer is done.
1323 * For anything fancier, you should use your own customized DMA
1324 * routines and callbacks.
1325 */ 972 */
1326int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, 973#ifndef CONFIG_ARCH_OMAP2PLUS
1327 unsigned int length) 974int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id)
1328{ 975{
1329 struct omap_mcbsp *mcbsp; 976 WARN(1, "%s: should never be called on an OMAP1-only kernel\n",
1330 int dma_tx_ch; 977 __func__);
1331 int src_port = 0; 978 return -EINVAL;
1332 int dest_port = 0;
1333 int sync_dev = 0;
1334
1335 if (!omap_mcbsp_check_valid_id(id)) {
1336 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1337 return -ENODEV;
1338 }
1339 mcbsp = id_to_mcbsp_ptr(id);
1340
1341 if (omap_request_dma(mcbsp->dma_tx_sync, "McBSP TX",
1342 omap_mcbsp_tx_dma_callback,
1343 mcbsp,
1344 &dma_tx_ch)) {
1345 dev_err(mcbsp->dev, " Unable to request DMA channel for "
1346 "McBSP%d TX. Trying IRQ based TX\n",
1347 mcbsp->id);
1348 return -EAGAIN;
1349 }
1350 mcbsp->dma_tx_lch = dma_tx_ch;
1351
1352 dev_err(mcbsp->dev, "McBSP%d TX DMA on channel %d\n", mcbsp->id,
1353 dma_tx_ch);
1354
1355 init_completion(&mcbsp->tx_dma_completion);
1356
1357 if (cpu_class_is_omap1()) {
1358 src_port = OMAP_DMA_PORT_TIPB;
1359 dest_port = OMAP_DMA_PORT_EMIFF;
1360 }
1361 if (cpu_class_is_omap2())
1362 sync_dev = mcbsp->dma_tx_sync;
1363
1364 omap_set_dma_transfer_params(mcbsp->dma_tx_lch,
1365 OMAP_DMA_DATA_TYPE_S16,
1366 length >> 1, 1,
1367 OMAP_DMA_SYNC_ELEMENT,
1368 sync_dev, 0);
1369
1370 omap_set_dma_dest_params(mcbsp->dma_tx_lch,
1371 src_port,
1372 OMAP_DMA_AMODE_CONSTANT,
1373 mcbsp->phys_base + OMAP_MCBSP_REG_DXR1,
1374 0, 0);
1375
1376 omap_set_dma_src_params(mcbsp->dma_tx_lch,
1377 dest_port,
1378 OMAP_DMA_AMODE_POST_INC,
1379 buffer,
1380 0, 0);
1381
1382 omap_start_dma(mcbsp->dma_tx_lch);
1383 wait_for_completion(&mcbsp->tx_dma_completion);
1384
1385 return 0;
1386} 979}
1387EXPORT_SYMBOL(omap_mcbsp_xmit_buffer);
1388 980
1389int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, 981void omap2_mcbsp1_mux_clkr_src(u8 mux)
1390 unsigned int length)
1391{ 982{
1392 struct omap_mcbsp *mcbsp; 983 WARN(1, "%s: should never be called on an OMAP1-only kernel\n",
1393 int dma_rx_ch; 984 __func__);
1394 int src_port = 0; 985 return;
1395 int dest_port = 0;
1396 int sync_dev = 0;
1397
1398 if (!omap_mcbsp_check_valid_id(id)) {
1399 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1400 return -ENODEV;
1401 }
1402 mcbsp = id_to_mcbsp_ptr(id);
1403
1404 if (omap_request_dma(mcbsp->dma_rx_sync, "McBSP RX",
1405 omap_mcbsp_rx_dma_callback,
1406 mcbsp,
1407 &dma_rx_ch)) {
1408 dev_err(mcbsp->dev, "Unable to request DMA channel for "
1409 "McBSP%d RX. Trying IRQ based RX\n",
1410 mcbsp->id);
1411 return -EAGAIN;
1412 }
1413 mcbsp->dma_rx_lch = dma_rx_ch;
1414
1415 dev_err(mcbsp->dev, "McBSP%d RX DMA on channel %d\n", mcbsp->id,
1416 dma_rx_ch);
1417
1418 init_completion(&mcbsp->rx_dma_completion);
1419
1420 if (cpu_class_is_omap1()) {
1421 src_port = OMAP_DMA_PORT_TIPB;
1422 dest_port = OMAP_DMA_PORT_EMIFF;
1423 }
1424 if (cpu_class_is_omap2())
1425 sync_dev = mcbsp->dma_rx_sync;
1426
1427 omap_set_dma_transfer_params(mcbsp->dma_rx_lch,
1428 OMAP_DMA_DATA_TYPE_S16,
1429 length >> 1, 1,
1430 OMAP_DMA_SYNC_ELEMENT,
1431 sync_dev, 0);
1432
1433 omap_set_dma_src_params(mcbsp->dma_rx_lch,
1434 src_port,
1435 OMAP_DMA_AMODE_CONSTANT,
1436 mcbsp->phys_base + OMAP_MCBSP_REG_DRR1,
1437 0, 0);
1438
1439 omap_set_dma_dest_params(mcbsp->dma_rx_lch,
1440 dest_port,
1441 OMAP_DMA_AMODE_POST_INC,
1442 buffer,
1443 0, 0);
1444
1445 omap_start_dma(mcbsp->dma_rx_lch);
1446 wait_for_completion(&mcbsp->rx_dma_completion);
1447
1448 return 0;
1449} 986}
1450EXPORT_SYMBOL(omap_mcbsp_recv_buffer);
1451 987
1452/* 988void omap2_mcbsp1_mux_fsr_src(u8 mux)
1453 * SPI wrapper.
1454 * Since SPI setup is much simpler than the generic McBSP one,
1455 * this wrapper just need an omap_mcbsp_spi_cfg structure as an input.
1456 * Once this is done, you can call omap_mcbsp_start().
1457 */
1458void omap_mcbsp_set_spi_mode(unsigned int id,
1459 const struct omap_mcbsp_spi_cfg *spi_cfg)
1460{ 989{
1461 struct omap_mcbsp *mcbsp; 990 WARN(1, "%s: should never be called on an OMAP1-only kernel\n",
1462 struct omap_mcbsp_reg_cfg mcbsp_cfg; 991 __func__);
1463 992 return;
1464 if (!omap_mcbsp_check_valid_id(id)) {
1465 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
1466 return;
1467 }
1468 mcbsp = id_to_mcbsp_ptr(id);
1469
1470 memset(&mcbsp_cfg, 0, sizeof(struct omap_mcbsp_reg_cfg));
1471
1472 /* SPI has only one frame */
1473 mcbsp_cfg.rcr1 |= (RWDLEN1(spi_cfg->word_length) | RFRLEN1(0));
1474 mcbsp_cfg.xcr1 |= (XWDLEN1(spi_cfg->word_length) | XFRLEN1(0));
1475
1476 /* Clock stop mode */
1477 if (spi_cfg->clk_stp_mode == OMAP_MCBSP_CLK_STP_MODE_NO_DELAY)
1478 mcbsp_cfg.spcr1 |= (1 << 12);
1479 else
1480 mcbsp_cfg.spcr1 |= (3 << 11);
1481
1482 /* Set clock parities */
1483 if (spi_cfg->rx_clock_polarity == OMAP_MCBSP_CLK_RISING)
1484 mcbsp_cfg.pcr0 |= CLKRP;
1485 else
1486 mcbsp_cfg.pcr0 &= ~CLKRP;
1487
1488 if (spi_cfg->tx_clock_polarity == OMAP_MCBSP_CLK_RISING)
1489 mcbsp_cfg.pcr0 &= ~CLKXP;
1490 else
1491 mcbsp_cfg.pcr0 |= CLKXP;
1492
1493 /* Set SCLKME to 0 and CLKSM to 1 */
1494 mcbsp_cfg.pcr0 &= ~SCLKME;
1495 mcbsp_cfg.srgr2 |= CLKSM;
1496
1497 /* Set FSXP */
1498 if (spi_cfg->fsx_polarity == OMAP_MCBSP_FS_ACTIVE_HIGH)
1499 mcbsp_cfg.pcr0 &= ~FSXP;
1500 else
1501 mcbsp_cfg.pcr0 |= FSXP;
1502
1503 if (spi_cfg->spi_mode == OMAP_MCBSP_SPI_MASTER) {
1504 mcbsp_cfg.pcr0 |= CLKXM;
1505 mcbsp_cfg.srgr1 |= CLKGDV(spi_cfg->clk_div - 1);
1506 mcbsp_cfg.pcr0 |= FSXM;
1507 mcbsp_cfg.srgr2 &= ~FSGM;
1508 mcbsp_cfg.xcr2 |= XDATDLY(1);
1509 mcbsp_cfg.rcr2 |= RDATDLY(1);
1510 } else {
1511 mcbsp_cfg.pcr0 &= ~CLKXM;
1512 mcbsp_cfg.srgr1 |= CLKGDV(1);
1513 mcbsp_cfg.pcr0 &= ~FSXM;
1514 mcbsp_cfg.xcr2 &= ~XDATDLY(3);
1515 mcbsp_cfg.rcr2 &= ~RDATDLY(3);
1516 }
1517
1518 mcbsp_cfg.xcr2 &= ~XPHASE;
1519 mcbsp_cfg.rcr2 &= ~RPHASE;
1520
1521 omap_mcbsp_config(id, &mcbsp_cfg);
1522} 993}
1523EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); 994#endif
1524 995
1525#ifdef CONFIG_ARCH_OMAP3 996#ifdef CONFIG_ARCH_OMAP3
1526#define max_thres(m) (mcbsp->pdata->buffer_size) 997#define max_thres(m) (mcbsp->pdata->buffer_size)
@@ -1833,8 +1304,6 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
1833 spin_lock_init(&mcbsp->lock); 1304 spin_lock_init(&mcbsp->lock);
1834 mcbsp->id = id + 1; 1305 mcbsp->id = id + 1;
1835 mcbsp->free = true; 1306 mcbsp->free = true;
1836 mcbsp->dma_tx_lch = -1;
1837 mcbsp->dma_rx_lch = -1;
1838 1307
1839 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu"); 1308 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
1840 if (!res) { 1309 if (!res) {
@@ -1860,9 +1329,6 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
1860 else 1329 else
1861 mcbsp->phys_dma_base = res->start; 1330 mcbsp->phys_dma_base = res->start;
1862 1331
1863 /* Default I/O is IRQ based */
1864 mcbsp->io_type = OMAP_MCBSP_IRQ_IO;
1865
1866 mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx"); 1332 mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
1867 mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx"); 1333 mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
1868 1334
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df0c21..02609eee056 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -236,61 +236,71 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
236 return 0; 236 return 0;
237} 237}
238 238
239static inline struct omap_device *_find_by_pdev(struct platform_device *pdev) 239static void _add_clkdev(struct omap_device *od, const char *clk_alias,
240 const char *clk_name)
240{ 241{
241 return container_of(pdev, struct omap_device, pdev); 242 struct clk *r;
243 struct clk_lookup *l;
244
245 if (!clk_alias || !clk_name)
246 return;
247
248 pr_debug("omap_device: %s: Creating %s -> %s\n",
249 dev_name(&od->pdev.dev), clk_alias, clk_name);
250
251 r = clk_get_sys(dev_name(&od->pdev.dev), clk_alias);
252 if (!IS_ERR(r)) {
253 pr_warning("omap_device: %s: alias %s already exists\n",
254 dev_name(&od->pdev.dev), clk_alias);
255 clk_put(r);
256 return;
257 }
258
259 r = omap_clk_get_by_name(clk_name);
260 if (IS_ERR(r)) {
261 pr_err("omap_device: %s: omap_clk_get_by_name for %s failed\n",
262 dev_name(&od->pdev.dev), clk_name);
263 return;
264 }
265
266 l = clkdev_alloc(r, clk_alias, dev_name(&od->pdev.dev));
267 if (!l) {
268 pr_err("omap_device: %s: clkdev_alloc for %s failed\n",
269 dev_name(&od->pdev.dev), clk_alias);
270 return;
271 }
272
273 clkdev_add(l);
242} 274}
243 275
244/** 276/**
245 * _add_optional_clock_clkdev - Add clkdev entry for hwmod optional clocks 277 * _add_hwmod_clocks_clkdev - Add clkdev entry for hwmod optional clocks
278 * and main clock
246 * @od: struct omap_device *od 279 * @od: struct omap_device *od
280 * @oh: struct omap_hwmod *oh
247 * 281 *
248 * For every optional clock present per hwmod per omap_device, this function 282 * For the main clock and every optional clock present per hwmod per
249 * adds an entry in the clkdev table of the form <dev-id=dev_name, con-id=role> 283 * omap_device, this function adds an entry in the clkdev table of the
250 * if it does not exist already. 284 * form <dev-id=dev_name, con-id=role> if it does not exist already.
251 * 285 *
252 * The function is called from inside omap_device_build_ss(), after 286 * The function is called from inside omap_device_build_ss(), after
253 * omap_device_register. 287 * omap_device_register.
254 * 288 *
255 * This allows drivers to get a pointer to its optional clocks based on its role 289 * This allows drivers to get a pointer to its optional clocks based on its role
256 * by calling clk_get(<dev*>, <role>). 290 * by calling clk_get(<dev*>, <role>).
291 * In the case of the main clock, a "fck" alias is used.
257 * 292 *
258 * No return value. 293 * No return value.
259 */ 294 */
260static void _add_optional_clock_clkdev(struct omap_device *od, 295static void _add_hwmod_clocks_clkdev(struct omap_device *od,
261 struct omap_hwmod *oh) 296 struct omap_hwmod *oh)
262{ 297{
263 int i; 298 int i;
264 299
265 for (i = 0; i < oh->opt_clks_cnt; i++) { 300 _add_clkdev(od, "fck", oh->main_clk);
266 struct omap_hwmod_opt_clk *oc;
267 struct clk *r;
268 struct clk_lookup *l;
269 301
270 oc = &oh->opt_clks[i]; 302 for (i = 0; i < oh->opt_clks_cnt; i++)
271 303 _add_clkdev(od, oh->opt_clks[i].role, oh->opt_clks[i].clk);
272 if (!oc->_clk)
273 continue;
274
275 r = clk_get_sys(dev_name(&od->pdev.dev), oc->role);
276 if (!IS_ERR(r))
277 continue; /* clkdev entry exists */
278
279 r = omap_clk_get_by_name((char *)oc->clk);
280 if (IS_ERR(r)) {
281 pr_err("omap_device: %s: omap_clk_get_by_name for %s failed\n",
282 dev_name(&od->pdev.dev), oc->clk);
283 continue;
284 }
285
286 l = clkdev_alloc(r, oc->role, dev_name(&od->pdev.dev));
287 if (!l) {
288 pr_err("omap_device: %s: clkdev_alloc for %s failed\n",
289 dev_name(&od->pdev.dev), oc->role);
290 return;
291 }
292 clkdev_add(l);
293 }
294} 304}
295 305
296 306
@@ -316,7 +326,7 @@ u32 omap_device_get_context_loss_count(struct platform_device *pdev)
316 struct omap_device *od; 326 struct omap_device *od;
317 u32 ret = 0; 327 u32 ret = 0;
318 328
319 od = _find_by_pdev(pdev); 329 od = to_omap_device(pdev);
320 330
321 if (od->hwmods_cnt) 331 if (od->hwmods_cnt)
322 ret = omap_hwmod_get_context_loss_count(od->hwmods[0]); 332 ret = omap_hwmod_get_context_loss_count(od->hwmods[0]);
@@ -497,7 +507,7 @@ struct omap_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
497 507
498 for (i = 0; i < oh_cnt; i++) { 508 for (i = 0; i < oh_cnt; i++) {
499 hwmods[i]->od = od; 509 hwmods[i]->od = od;
500 _add_optional_clock_clkdev(od, hwmods[i]); 510 _add_hwmod_clocks_clkdev(od, hwmods[i]);
501 } 511 }
502 512
503 if (ret) 513 if (ret)
@@ -537,6 +547,7 @@ int omap_early_device_register(struct omap_device *od)
537 return 0; 547 return 0;
538} 548}
539 549
550#ifdef CONFIG_PM_RUNTIME
540static int _od_runtime_suspend(struct device *dev) 551static int _od_runtime_suspend(struct device *dev)
541{ 552{
542 struct platform_device *pdev = to_platform_device(dev); 553 struct platform_device *pdev = to_platform_device(dev);
@@ -563,13 +574,59 @@ static int _od_runtime_resume(struct device *dev)
563 574
564 return pm_generic_runtime_resume(dev); 575 return pm_generic_runtime_resume(dev);
565} 576}
577#endif
578
579#ifdef CONFIG_SUSPEND
580static int _od_suspend_noirq(struct device *dev)
581{
582 struct platform_device *pdev = to_platform_device(dev);
583 struct omap_device *od = to_omap_device(pdev);
584 int ret;
585
586 if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
587 return pm_generic_suspend_noirq(dev);
588
589 ret = pm_generic_suspend_noirq(dev);
590
591 if (!ret && !pm_runtime_status_suspended(dev)) {
592 if (pm_generic_runtime_suspend(dev) == 0) {
593 omap_device_idle(pdev);
594 od->flags |= OMAP_DEVICE_SUSPENDED;
595 }
596 }
597
598 return ret;
599}
600
601static int _od_resume_noirq(struct device *dev)
602{
603 struct platform_device *pdev = to_platform_device(dev);
604 struct omap_device *od = to_omap_device(pdev);
605
606 if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
607 return pm_generic_resume_noirq(dev);
608
609 if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
610 !pm_runtime_status_suspended(dev)) {
611 od->flags &= ~OMAP_DEVICE_SUSPENDED;
612 omap_device_enable(pdev);
613 pm_generic_runtime_resume(dev);
614 }
615
616 return pm_generic_resume_noirq(dev);
617}
618#else
619#define _od_suspend_noirq NULL
620#define _od_resume_noirq NULL
621#endif
566 622
567static struct dev_power_domain omap_device_power_domain = { 623static struct dev_pm_domain omap_device_pm_domain = {
568 .ops = { 624 .ops = {
569 .runtime_suspend = _od_runtime_suspend, 625 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
570 .runtime_idle = _od_runtime_idle, 626 _od_runtime_idle)
571 .runtime_resume = _od_runtime_resume,
572 USE_PLATFORM_PM_SLEEP_OPS 627 USE_PLATFORM_PM_SLEEP_OPS
628 .suspend_noirq = _od_suspend_noirq,
629 .resume_noirq = _od_resume_noirq,
573 } 630 }
574}; 631};
575 632
@@ -586,7 +643,7 @@ int omap_device_register(struct omap_device *od)
586 pr_debug("omap_device: %s: registering\n", od->pdev.name); 643 pr_debug("omap_device: %s: registering\n", od->pdev.name);
587 644
588 od->pdev.dev.parent = &omap_device_parent; 645 od->pdev.dev.parent = &omap_device_parent;
589 od->pdev.dev.pwr_domain = &omap_device_power_domain; 646 od->pdev.dev.pm_domain = &omap_device_pm_domain;
590 return platform_device_register(&od->pdev); 647 return platform_device_register(&od->pdev);
591} 648}
592 649
@@ -611,7 +668,7 @@ int omap_device_enable(struct platform_device *pdev)
611 int ret; 668 int ret;
612 struct omap_device *od; 669 struct omap_device *od;
613 670
614 od = _find_by_pdev(pdev); 671 od = to_omap_device(pdev);
615 672
616 if (od->_state == OMAP_DEVICE_STATE_ENABLED) { 673 if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
617 WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n", 674 WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n",
@@ -650,7 +707,7 @@ int omap_device_idle(struct platform_device *pdev)
650 int ret; 707 int ret;
651 struct omap_device *od; 708 struct omap_device *od;
652 709
653 od = _find_by_pdev(pdev); 710 od = to_omap_device(pdev);
654 711
655 if (od->_state != OMAP_DEVICE_STATE_ENABLED) { 712 if (od->_state != OMAP_DEVICE_STATE_ENABLED) {
656 WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n", 713 WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n",
@@ -681,7 +738,7 @@ int omap_device_shutdown(struct platform_device *pdev)
681 int ret, i; 738 int ret, i;
682 struct omap_device *od; 739 struct omap_device *od;
683 740
684 od = _find_by_pdev(pdev); 741 od = to_omap_device(pdev);
685 742
686 if (od->_state != OMAP_DEVICE_STATE_ENABLED && 743 if (od->_state != OMAP_DEVICE_STATE_ENABLED &&
687 od->_state != OMAP_DEVICE_STATE_IDLE) { 744 od->_state != OMAP_DEVICE_STATE_IDLE) {
@@ -722,7 +779,7 @@ int omap_device_align_pm_lat(struct platform_device *pdev,
722 int ret = -EINVAL; 779 int ret = -EINVAL;
723 struct omap_device *od; 780 struct omap_device *od;
724 781
725 od = _find_by_pdev(pdev); 782 od = to_omap_device(pdev);
726 783
727 if (new_wakeup_lat_limit == od->dev_wakeup_lat) 784 if (new_wakeup_lat_limit == od->dev_wakeup_lat)
728 return 0; 785 return 0;
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 6af3d0b1f8d..363c91e44ef 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -394,20 +394,15 @@ void omap3_sram_restore_context(void)
394} 394}
395#endif /* CONFIG_PM */ 395#endif /* CONFIG_PM */
396 396
397static int __init omap34xx_sram_init(void) 397#endif /* CONFIG_ARCH_OMAP3 */
398{ 398
399 _omap3_sram_configure_core_dpll =
400 omap_sram_push(omap3_sram_configure_core_dpll,
401 omap3_sram_configure_core_dpll_sz);
402 omap_push_sram_idle();
403 return 0;
404}
405#else
406static inline int omap34xx_sram_init(void) 399static inline int omap34xx_sram_init(void)
407{ 400{
401#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
402 omap3_sram_restore_context();
403#endif
408 return 0; 404 return 0;
409} 405}
410#endif
411 406
412int __init omap_sram_init(void) 407int __init omap_sram_init(void)
413{ 408{