aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-09-20 22:57:38 -0400
committerOlof Johansson <olof@lixom.net>2012-09-20 22:57:38 -0400
commit32dec75349da4e68b53f099ce3a96469cdc334d6 (patch)
tree96aca7914b07697668ec716a3f80fb8c82e7a971 /arch
parent740418ef19fd7def7b9c333435d4ca1dce28cc42 (diff)
parent9891e3240543c45176b1298164418b61e8909ed0 (diff)
Merge tag 'tegra-for-3.7-dmaengine' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra into next/cleanup
ARM: tegra: switch to dmaengine The Tegra code-base has contained both a legacy DMA and a dmaengine driver since v3.6-rcX. This series flips Tegra's defconfig to enable dmaengine rather than the legacy driver, and removes the legacy driver and all client code. * tag 'tegra-for-3.7-dmaengine' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra: ASoC: tegra: remove support of legacy DMA driver based access spi: tegra: remove support of legacy DMA driver based access ARM: tegra: apbio: remove support of legacy DMA driver based access ARM: tegra: dma: remove legacy APB DMA driver ARM: tegra: config: enable dmaengine based APB DMA driver + sync to 3.6-rc6
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/configs/tegra_defconfig2
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/kernel/hw_breakpoint.c62
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/lib/getuser.S23
-rw-r--r--arch/arm/lib/putuser.S6
-rw-r--r--arch/arm/mach-imx/clk-imx25.c6
-rw-r--r--arch/arm/mach-imx/clk-imx35.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/clock33xx_data.c14
-rw-r--r--arch/arm/mach-omap2/clockdomain2xxx_3xxx.c50
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c15
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-tegra/Kconfig7
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/apbio.c118
-rw-r--r--arch/arm/mach-tegra/dma.c823
-rw-r--r--arch/arm/mach-tegra/include/mach/dma.h97
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/plat-omap/sram.c11
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/Makefile1
-rw-r--r--arch/blackfin/include/asm/smp.h2
-rw-r--r--arch/blackfin/mach-common/smp.c223
-rw-r--r--arch/s390/oprofile/init.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c3
-rw-r--r--arch/x86/kernel/microcode_core.c3
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/kvm/x86.c13
45 files changed, 357 insertions, 1306 deletions
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f15f82bf3a50..e968a52e4881 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -356,15 +356,15 @@ choice
356 is nothing connected to read from the DCC. 356 is nothing connected to read from the DCC.
357 357
358 config DEBUG_SEMIHOSTING 358 config DEBUG_SEMIHOSTING
359 bool "Kernel low-level debug output via semihosting I" 359 bool "Kernel low-level debug output via semihosting I/O"
360 help 360 help
361 Semihosting enables code running on an ARM target to use 361 Semihosting enables code running on an ARM target to use
362 the I/O facilities on a host debugger/emulator through a 362 the I/O facilities on a host debugger/emulator through a
363 simple SVC calls. The host debugger or emulator must have 363 simple SVC call. The host debugger or emulator must have
364 semihosting enabled for the special svc call to be trapped 364 semihosting enabled for the special svc call to be trapped
365 otherwise the kernel will crash. 365 otherwise the kernel will crash.
366 366
367 This is known to work with OpenOCD, as wellas 367 This is known to work with OpenOCD, as well as
368 ARM's Fast Models, or any other controlling environment 368 ARM's Fast Models, or any other controlling environment
369 that implements semihosting. 369 that implements semihosting.
370 370
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 210c923025b1..74381a31ee42 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -283,10 +283,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
283zinstall uinstall install: vmlinux 283zinstall uinstall install: vmlinux
284 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 284 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
285 285
286%.dtb: 286%.dtb: scripts
287 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 287 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
288 288
289dtbs: 289dtbs: scripts
290 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 290 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
291 291
292# We use MRPROPER_FILES and CLEAN_FILES now 292# We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index b8c64b80bafc..81769c1341fa 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
659#ifdef CONFIG_CPU_ENDIAN_BE8 659#ifdef CONFIG_CPU_ENDIAN_BE8
660 orr r0, r0, #1 << 25 @ big-endian page tables 660 orr r0, r0, #1 << 25 @ big-endian page tables
661#endif 661#endif
662 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
662 orrne r0, r0, #1 @ MMU enabled 663 orrne r0, r0, #1 @ MMU enabled
663 movne r1, #0xfffffffd @ domain 0 = client 664 movne r1, #0xfffffffd @ domain 0 = client
665 bic r6, r6, #1 << 31 @ 32-bit translation system
666 bic r6, r6, #3 << 0 @ use only ttbr0
664 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 667 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
665 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 668 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
669 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
666#endif 670#endif
667 mcr p15, 0, r0, c7, c5, 4 @ ISB 671 mcr p15, 0, r0, c7, c5, 4 @ ISB
668 mcr p15, 0, r0, c1, c0, 0 @ load control register 672 mcr p15, 0, r0, c1, c0, 0 @ load control register
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index db2245353f0f..0d6bb738c6de 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -145,6 +145,8 @@ CONFIG_MMC_SDHCI_TEGRA=y
145CONFIG_RTC_CLASS=y 145CONFIG_RTC_CLASS=y
146CONFIG_RTC_DRV_EM3027=y 146CONFIG_RTC_DRV_EM3027=y
147CONFIG_RTC_DRV_TEGRA=y 147CONFIG_RTC_DRV_TEGRA=y
148CONFIG_DMADEVICES=y
149CONFIG_TEGRA20_APB_DMA=y
148CONFIG_STAGING=y 150CONFIG_STAGING=y
149CONFIG_SENSORS_ISL29018=y 151CONFIG_SENSORS_ISL29018=y
150CONFIG_SENSORS_ISL29028=y 152CONFIG_SENSORS_ISL29028=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0d..5c8b3bf4d825 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
320 .size \name , . - \name 320 .size \name , . - \name
321 .endm 321 .endm
322 322
323 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
324#ifndef CONFIG_CPU_USE_DOMAINS
325 adds \tmp, \addr, #\size - 1
326 sbcccs \tmp, \tmp, \limit
327 bcs \bad
328#endif
329 .endm
330
323#endif /* __ASM_ASSEMBLER_H__ */ 331#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f1..5f6ddcc56452 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
187#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 187#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
188#endif 188#endif
189#endif 189#endif
190#endif /* __ASSEMBLY__ */
190 191
191#ifndef PHYS_OFFSET 192#ifndef PHYS_OFFSET
192#ifdef PLAT_PHYS_OFFSET 193#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
196#endif 197#endif
197#endif 198#endif
198 199
200#ifndef __ASSEMBLY__
201
199/* 202/*
200 * PFNs are used to describe any physical page; this means 203 * PFNs are used to describe any physical page; this means
201 * PFN 0 == physical address 0. 204 * PFN 0 == physical address 0.
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae7..99a19512ee26 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
199{ 199{
200 pgtable_page_dtor(pte); 200 pgtable_page_dtor(pte);
201 201
202#ifdef CONFIG_ARM_LPAE
203 tlb_add_flush(tlb, addr);
204#else
202 /* 205 /*
203 * With the classic ARM MMU, a pte page has two corresponding pmd 206 * With the classic ARM MMU, a pte page has two corresponding pmd
204 * entries, each covering 1MB. 207 * entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
206 addr &= PMD_MASK; 209 addr &= PMD_MASK;
207 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); 210 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
208 tlb_add_flush(tlb, addr + SZ_1M); 211 tlb_add_flush(tlb, addr + SZ_1M);
212#endif
209 213
210 tlb_remove_page(tlb, pte); 214 tlb_remove_page(tlb, pte);
211} 215}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b5..77bd79f2ffdb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
101extern int __get_user_2(void *); 101extern int __get_user_2(void *);
102extern int __get_user_4(void *); 102extern int __get_user_4(void *);
103 103
104#define __get_user_x(__r2,__p,__e,__s,__i...) \ 104#define __GUP_CLOBBER_1 "lr", "cc"
105#ifdef CONFIG_CPU_USE_DOMAINS
106#define __GUP_CLOBBER_2 "ip", "lr", "cc"
107#else
108#define __GUP_CLOBBER_2 "lr", "cc"
109#endif
110#define __GUP_CLOBBER_4 "lr", "cc"
111
112#define __get_user_x(__r2,__p,__e,__l,__s) \
105 __asm__ __volatile__ ( \ 113 __asm__ __volatile__ ( \
106 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 114 __asmeq("%0", "r0") __asmeq("%1", "r2") \
115 __asmeq("%3", "r1") \
107 "bl __get_user_" #__s \ 116 "bl __get_user_" #__s \
108 : "=&r" (__e), "=r" (__r2) \ 117 : "=&r" (__e), "=r" (__r2) \
109 : "0" (__p) \ 118 : "0" (__p), "r" (__l) \
110 : __i, "cc") 119 : __GUP_CLOBBER_##__s)
111 120
112#define get_user(x,p) \ 121#define __get_user_check(x,p) \
113 ({ \ 122 ({ \
123 unsigned long __limit = current_thread_info()->addr_limit - 1; \
114 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 124 register const typeof(*(p)) __user *__p asm("r0") = (p);\
115 register unsigned long __r2 asm("r2"); \ 125 register unsigned long __r2 asm("r2"); \
126 register unsigned long __l asm("r1") = __limit; \
116 register int __e asm("r0"); \ 127 register int __e asm("r0"); \
117 switch (sizeof(*(__p))) { \ 128 switch (sizeof(*(__p))) { \
118 case 1: \ 129 case 1: \
119 __get_user_x(__r2, __p, __e, 1, "lr"); \ 130 __get_user_x(__r2, __p, __e, __l, 1); \
120 break; \ 131 break; \
121 case 2: \ 132 case 2: \
122 __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ 133 __get_user_x(__r2, __p, __e, __l, 2); \
123 break; \ 134 break; \
124 case 4: \ 135 case 4: \
125 __get_user_x(__r2, __p, __e, 4, "lr"); \ 136 __get_user_x(__r2, __p, __e, __l, 4); \
126 break; \ 137 break; \
127 default: __e = __get_user_bad(); break; \ 138 default: __e = __get_user_bad(); break; \
128 } \ 139 } \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
130 __e; \ 141 __e; \
131 }) 142 })
132 143
144#define get_user(x,p) \
145 ({ \
146 might_fault(); \
147 __get_user_check(x,p); \
148 })
149
133extern int __put_user_1(void *, unsigned int); 150extern int __put_user_1(void *, unsigned int);
134extern int __put_user_2(void *, unsigned int); 151extern int __put_user_2(void *, unsigned int);
135extern int __put_user_4(void *, unsigned int); 152extern int __put_user_4(void *, unsigned int);
136extern int __put_user_8(void *, unsigned long long); 153extern int __put_user_8(void *, unsigned long long);
137 154
138#define __put_user_x(__r2,__p,__e,__s) \ 155#define __put_user_x(__r2,__p,__e,__l,__s) \
139 __asm__ __volatile__ ( \ 156 __asm__ __volatile__ ( \
140 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 157 __asmeq("%0", "r0") __asmeq("%2", "r2") \
158 __asmeq("%3", "r1") \
141 "bl __put_user_" #__s \ 159 "bl __put_user_" #__s \
142 : "=&r" (__e) \ 160 : "=&r" (__e) \
143 : "0" (__p), "r" (__r2) \ 161 : "0" (__p), "r" (__r2), "r" (__l) \
144 : "ip", "lr", "cc") 162 : "ip", "lr", "cc")
145 163
146#define put_user(x,p) \ 164#define __put_user_check(x,p) \
147 ({ \ 165 ({ \
166 unsigned long __limit = current_thread_info()->addr_limit - 1; \
148 register const typeof(*(p)) __r2 asm("r2") = (x); \ 167 register const typeof(*(p)) __r2 asm("r2") = (x); \
149 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 168 register const typeof(*(p)) __user *__p asm("r0") = (p);\
169 register unsigned long __l asm("r1") = __limit; \
150 register int __e asm("r0"); \ 170 register int __e asm("r0"); \
151 switch (sizeof(*(__p))) { \ 171 switch (sizeof(*(__p))) { \
152 case 1: \ 172 case 1: \
153 __put_user_x(__r2, __p, __e, 1); \ 173 __put_user_x(__r2, __p, __e, __l, 1); \
154 break; \ 174 break; \
155 case 2: \ 175 case 2: \
156 __put_user_x(__r2, __p, __e, 2); \ 176 __put_user_x(__r2, __p, __e, __l, 2); \
157 break; \ 177 break; \
158 case 4: \ 178 case 4: \
159 __put_user_x(__r2, __p, __e, 4); \ 179 __put_user_x(__r2, __p, __e, __l, 4); \
160 break; \ 180 break; \
161 case 8: \ 181 case 8: \
162 __put_user_x(__r2, __p, __e, 8); \ 182 __put_user_x(__r2, __p, __e, __l, 8); \
163 break; \ 183 break; \
164 default: __e = __put_user_bad(); break; \ 184 default: __e = __put_user_bad(); break; \
165 } \ 185 } \
166 __e; \ 186 __e; \
167 }) 187 })
168 188
189#define put_user(x,p) \
190 ({ \
191 might_fault(); \
192 __put_user_check(x,p); \
193 })
194
169#else /* CONFIG_MMU */ 195#else /* CONFIG_MMU */
170 196
171/* 197/*
@@ -219,6 +245,7 @@ do { \
219 unsigned long __gu_addr = (unsigned long)(ptr); \ 245 unsigned long __gu_addr = (unsigned long)(ptr); \
220 unsigned long __gu_val; \ 246 unsigned long __gu_val; \
221 __chk_user_ptr(ptr); \ 247 __chk_user_ptr(ptr); \
248 might_fault(); \
222 switch (sizeof(*(ptr))) { \ 249 switch (sizeof(*(ptr))) { \
223 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ 250 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
224 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ 251 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
300 unsigned long __pu_addr = (unsigned long)(ptr); \ 327 unsigned long __pu_addr = (unsigned long)(ptr); \
301 __typeof__(*(ptr)) __pu_val = (x); \ 328 __typeof__(*(ptr)) __pu_val = (x); \
302 __chk_user_ptr(ptr); \ 329 __chk_user_ptr(ptr); \
330 might_fault(); \
303 switch (sizeof(*(ptr))) { \ 331 switch (sizeof(*(ptr))) { \
304 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ 332 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
305 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ 333 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index ba386bd94107..281bf3301241 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
159 arch >= ARM_DEBUG_ARCH_V7_1; 159 arch >= ARM_DEBUG_ARCH_V7_1;
160} 160}
161 161
162/* Can we determine the watchpoint access type from the fsr? */
163static int debug_exception_updates_fsr(void)
164{
165 return 0;
166}
167
162/* Determine number of WRP registers available. */ 168/* Determine number of WRP registers available. */
163static int get_num_wrp_resources(void) 169static int get_num_wrp_resources(void)
164{ 170{
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
604 /* Aligned */ 610 /* Aligned */
605 break; 611 break;
606 case 1: 612 case 1:
607 /* Allow single byte watchpoint. */
608 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
609 break;
610 case 2: 613 case 2:
611 /* Allow halfword watchpoints and breakpoints. */ 614 /* Allow halfword watchpoints and breakpoints. */
612 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 615 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
613 break; 616 break;
617 case 3:
618 /* Allow single byte watchpoint. */
619 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
620 break;
614 default: 621 default:
615 ret = -EINVAL; 622 ret = -EINVAL;
616 goto out; 623 goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
619 info->address &= ~alignment_mask; 626 info->address &= ~alignment_mask;
620 info->ctrl.len <<= offset; 627 info->ctrl.len <<= offset;
621 628
622 /* 629 if (!bp->overflow_handler) {
623 * Currently we rely on an overflow handler to take 630 /*
624 * care of single-stepping the breakpoint when it fires. 631 * Mismatch breakpoints are required for single-stepping
625 * In the case of userspace breakpoints on a core with V7 debug, 632 * breakpoints.
626 * we can use the mismatch feature as a poor-man's hardware 633 */
627 * single-step, but this only works for per-task breakpoints. 634 if (!core_has_mismatch_brps())
628 */ 635 return -EINVAL;
629 if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || 636
630 !core_has_mismatch_brps() || !bp->hw.bp_target)) { 637 /* We don't allow mismatch breakpoints in kernel space. */
631 pr_warning("overflow handler required but none found\n"); 638 if (arch_check_bp_in_kernelspace(bp))
632 ret = -EINVAL; 639 return -EPERM;
640
641 /*
642 * Per-cpu breakpoints are not supported by our stepping
643 * mechanism.
644 */
645 if (!bp->hw.bp_target)
646 return -EINVAL;
647
648 /*
649 * We only support specific access types if the fsr
650 * reports them.
651 */
652 if (!debug_exception_updates_fsr() &&
653 (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
654 info->ctrl.type == ARM_BREAKPOINT_STORE))
655 return -EINVAL;
633 } 656 }
657
634out: 658out:
635 return ret; 659 return ret;
636} 660}
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
706 goto unlock; 730 goto unlock;
707 731
708 /* Check that the access type matches. */ 732 /* Check that the access type matches. */
709 access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : 733 if (debug_exception_updates_fsr()) {
710 HW_BREAKPOINT_R; 734 access = (fsr & ARM_FSR_ACCESS_MASK) ?
711 if (!(access & hw_breakpoint_type(wp))) 735 HW_BREAKPOINT_W : HW_BREAKPOINT_R;
712 goto unlock; 736 if (!(access & hw_breakpoint_type(wp)))
737 goto unlock;
738 }
713 739
714 /* We have a winner. */ 740 /* We have a winner. */
715 info->trigger = addr; 741 info->trigger = addr;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f7945218b8c6..b0179b89a04c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
420#endif 420#endif
421 instr = *(u32 *) pc; 421 instr = *(u32 *) pc;
422 } else if (thumb_mode(regs)) { 422 } else if (thumb_mode(regs)) {
423 get_user(instr, (u16 __user *)pc); 423 if (get_user(instr, (u16 __user *)pc))
424 goto die_sig;
424 if (is_wide_instruction(instr)) { 425 if (is_wide_instruction(instr)) {
425 unsigned int instr2; 426 unsigned int instr2;
426 get_user(instr2, (u16 __user *)pc+1); 427 if (get_user(instr2, (u16 __user *)pc+1))
428 goto die_sig;
427 instr <<= 16; 429 instr <<= 16;
428 instr |= instr2; 430 instr |= instr2;
429 } 431 }
430 } else { 432 } else if (get_user(instr, (u32 __user *)pc)) {
431 get_user(instr, (u32 __user *)pc); 433 goto die_sig;
432 } 434 }
433 435
434 if (call_undef_hook(regs, instr) == 0) 436 if (call_undef_hook(regs, instr) == 0)
435 return; 437 return;
436 438
439die_sig:
437#ifdef CONFIG_DEBUG_USER 440#ifdef CONFIG_DEBUG_USER
438 if (user_debug & UDBG_UNDEFINED) { 441 if (user_debug & UDBG_UNDEFINED) {
439 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 442 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index d6dacc69254e..395d5fbb8fa2 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
59{ 59{
60 pr_info("Switching to timer-based delay loop\n"); 60 pr_info("Switching to timer-based delay loop\n");
61 lpj_fine = freq / HZ; 61 lpj_fine = freq / HZ;
62 loops_per_jiffy = lpj_fine;
62 arm_delay_ops.delay = __timer_delay; 63 arm_delay_ops.delay = __timer_delay;
63 arm_delay_ops.const_udelay = __timer_const_udelay; 64 arm_delay_ops.const_udelay = __timer_const_udelay;
64 arm_delay_ops.udelay = __timer_udelay; 65 arm_delay_ops.udelay = __timer_udelay;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e32..9b06bb41fca6 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -16,8 +16,9 @@
16 * __get_user_X 16 * __get_user_X
17 * 17 *
18 * Inputs: r0 contains the address 18 * Inputs: r0 contains the address
19 * r1 contains the address limit, which must be preserved
19 * Outputs: r0 is the error code 20 * Outputs: r0 is the error code
20 * r2, r3 contains the zero-extended value 21 * r2 contains the zero-extended value
21 * lr corrupted 22 * lr corrupted
22 * 23 *
23 * No other registers must be altered. (see <asm/uaccess.h> 24 * No other registers must be altered. (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
27 * Note also that it is intended that __get_user_bad is not global. 28 * Note also that it is intended that __get_user_bad is not global.
28 */ 29 */
29#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/assembler.h>
30#include <asm/errno.h> 32#include <asm/errno.h>
31#include <asm/domain.h> 33#include <asm/domain.h>
32 34
33ENTRY(__get_user_1) 35ENTRY(__get_user_1)
36 check_uaccess r0, 1, r1, r2, __get_user_bad
341: TUSER(ldrb) r2, [r0] 371: TUSER(ldrb) r2, [r0]
35 mov r0, #0 38 mov r0, #0
36 mov pc, lr 39 mov pc, lr
37ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
38 41
39ENTRY(__get_user_2) 42ENTRY(__get_user_2)
40#ifdef CONFIG_THUMB2_KERNEL 43 check_uaccess r0, 2, r1, r2, __get_user_bad
412: TUSER(ldrb) r2, [r0] 44#ifdef CONFIG_CPU_USE_DOMAINS
423: TUSER(ldrb) r3, [r0, #1] 45rb .req ip
462: ldrbt r2, [r0], #1
473: ldrbt rb, [r0], #0
43#else 48#else
442: TUSER(ldrb) r2, [r0], #1 49rb .req r0
453: TUSER(ldrb) r3, [r0] 502: ldrb r2, [r0]
513: ldrb rb, [r0, #1]
46#endif 52#endif
47#ifndef __ARMEB__ 53#ifndef __ARMEB__
48 orr r2, r2, r3, lsl #8 54 orr r2, r2, rb, lsl #8
49#else 55#else
50 orr r2, r3, r2, lsl #8 56 orr r2, rb, r2, lsl #8
51#endif 57#endif
52 mov r0, #0 58 mov r0, #0
53 mov pc, lr 59 mov pc, lr
54ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
55 61
56ENTRY(__get_user_4) 62ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad
574: TUSER(ldr) r2, [r0] 644: TUSER(ldr) r2, [r0]
58 mov r0, #0 65 mov r0, #0
59 mov pc, lr 66 mov pc, lr
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c589..3d73dcb959b0 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -16,6 +16,7 @@
16 * __put_user_X 16 * __put_user_X
17 * 17 *
18 * Inputs: r0 contains the address 18 * Inputs: r0 contains the address
19 * r1 contains the address limit, which must be preserved
19 * r2, r3 contains the value 20 * r2, r3 contains the value
20 * Outputs: r0 is the error code 21 * Outputs: r0 is the error code
21 * lr corrupted 22 * lr corrupted
@@ -27,16 +28,19 @@
27 * Note also that it is intended that __put_user_bad is not global. 28 * Note also that it is intended that __put_user_bad is not global.
28 */ 29 */
29#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/assembler.h>
30#include <asm/errno.h> 32#include <asm/errno.h>
31#include <asm/domain.h> 33#include <asm/domain.h>
32 34
33ENTRY(__put_user_1) 35ENTRY(__put_user_1)
36 check_uaccess r0, 1, r1, ip, __put_user_bad
341: TUSER(strb) r2, [r0] 371: TUSER(strb) r2, [r0]
35 mov r0, #0 38 mov r0, #0
36 mov pc, lr 39 mov pc, lr
37ENDPROC(__put_user_1) 40ENDPROC(__put_user_1)
38 41
39ENTRY(__put_user_2) 42ENTRY(__put_user_2)
43 check_uaccess r0, 2, r1, ip, __put_user_bad
40 mov ip, r2, lsr #8 44 mov ip, r2, lsr #8
41#ifdef CONFIG_THUMB2_KERNEL 45#ifdef CONFIG_THUMB2_KERNEL
42#ifndef __ARMEB__ 46#ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
60ENDPROC(__put_user_2) 64ENDPROC(__put_user_2)
61 65
62ENTRY(__put_user_4) 66ENTRY(__put_user_4)
67 check_uaccess r0, 4, r1, ip, __put_user_bad
634: TUSER(str) r2, [r0] 684: TUSER(str) r2, [r0]
64 mov r0, #0 69 mov r0, #0
65 mov pc, lr 70 mov pc, lr
66ENDPROC(__put_user_4) 71ENDPROC(__put_user_4)
67 72
68ENTRY(__put_user_8) 73ENTRY(__put_user_8)
74 check_uaccess r0, 8, r1, ip, __put_user_bad
69#ifdef CONFIG_THUMB2_KERNEL 75#ifdef CONFIG_THUMB2_KERNEL
705: TUSER(str) r2, [r0] 765: TUSER(str) r2, [r0]
716: TUSER(str) r3, [r0, #4] 776: TUSER(str) r3, [r0, #4]
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index fdd8cc87c9fe..4431a62fff5b 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
222 clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); 222 clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
223 clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); 223 clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
224 clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); 224 clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
225 clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); 225 clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
226 clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); 226 clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
227 clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
228 clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
229 clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); 227 clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
230 clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); 228 clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
231 clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); 229 clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index c6422fb10bae..65fb8bcd86cb 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -230,10 +230,8 @@ int __init mx35_clocks_init()
230 clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); 230 clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
231 clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); 231 clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
232 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); 232 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
233 clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); 233 clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
234 clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); 234 clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
235 clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
236 clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
237 /* i.mx35 has the i.mx21 type uart */ 235 /* i.mx35 has the i.mx21 type uart */
238 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); 236 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
239 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); 237 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index fcd4e85c4ddc..346fd26f3aa6 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA
232 select OMAP_PACKAGE_CBB 232 select OMAP_PACKAGE_CBB
233 select REGULATOR_FIXED_VOLTAGE if REGULATOR 233 select REGULATOR_FIXED_VOLTAGE if REGULATOR
234 234
235config MACH_OMAP3_TOUCHBOOK 235config MACH_TOUCHBOOK
236 bool "OMAP3 Touch Book" 236 bool "OMAP3 Touch Book"
237 depends on ARCH_OMAP3 237 depends on ARCH_OMAP3
238 default y 238 default y
239 select OMAP_PACKAGE_CBB
239 240
240config MACH_OMAP_3430SDP 241config MACH_OMAP_3430SDP
241 bool "OMAP 3430 SDP board" 242 bool "OMAP 3430 SDP board"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index eb203ec193d0..7706fdfd0252 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -235,7 +235,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
235obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o 235obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
236obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o 236obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
237obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o 237obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
238obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o 238obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
239obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o 239obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
240obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o 240obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o
241 241
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
index 7aa5ecaee5a2..8e06de665b14 100644
--- a/arch/arm/mach-omap2/clock33xx_data.c
+++ b/arch/arm/mach-omap2/clock33xx_data.c
@@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
1036 CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX), 1036 CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
1037 CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX), 1037 CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
1038 CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX), 1038 CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
1039 CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX), 1039 CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
1040 CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX), 1040 CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
1041 CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX), 1041 CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
1042 CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX), 1042 CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
1043 CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX), 1043 CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
1044 CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX), 1044 CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
1045 CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX), 1045 CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
1046 CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX), 1046 CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
1047 CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX), 1047 CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
1048 CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX), 1048 CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
index a0d68dbecfa3..f99e65cfb862 100644
--- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
241 _clkdm_del_autodeps(clkdm); 241 _clkdm_del_autodeps(clkdm);
242} 242}
243 243
244static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
245{
246 bool hwsup = false;
247
248 if (!clkdm->clktrctrl_mask)
249 return 0;
250
251 hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
252 clkdm->clktrctrl_mask);
253
254 if (hwsup) {
255 /* Disable HW transitions when we are changing deps */
256 _disable_hwsup(clkdm);
257 _clkdm_add_autodeps(clkdm);
258 _enable_hwsup(clkdm);
259 } else {
260 if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
261 omap3_clkdm_wakeup(clkdm);
262 }
263
264 return 0;
265}
266
267static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
268{
269 bool hwsup = false;
270
271 if (!clkdm->clktrctrl_mask)
272 return 0;
273
274 hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
275 clkdm->clktrctrl_mask);
276
277 if (hwsup) {
278 /* Disable HW transitions when we are changing deps */
279 _disable_hwsup(clkdm);
280 _clkdm_del_autodeps(clkdm);
281 _enable_hwsup(clkdm);
282 } else {
283 if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
284 omap3_clkdm_sleep(clkdm);
285 }
286
287 return 0;
288}
289
244struct clkdm_ops omap2_clkdm_operations = { 290struct clkdm_ops omap2_clkdm_operations = {
245 .clkdm_add_wkdep = omap2_clkdm_add_wkdep, 291 .clkdm_add_wkdep = omap2_clkdm_add_wkdep,
246 .clkdm_del_wkdep = omap2_clkdm_del_wkdep, 292 .clkdm_del_wkdep = omap2_clkdm_del_wkdep,
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
267 .clkdm_wakeup = omap3_clkdm_wakeup, 313 .clkdm_wakeup = omap3_clkdm_wakeup,
268 .clkdm_allow_idle = omap3_clkdm_allow_idle, 314 .clkdm_allow_idle = omap3_clkdm_allow_idle,
269 .clkdm_deny_idle = omap3_clkdm_deny_idle, 315 .clkdm_deny_idle = omap3_clkdm_deny_idle,
270 .clkdm_clk_enable = omap2_clkdm_clk_enable, 316 .clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
271 .clkdm_clk_disable = omap2_clkdm_clk_disable, 317 .clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
272}; 318};
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 766338fe4d34..975f6bda0e0b 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -67,6 +67,7 @@
67#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) 67#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
68 68
69/* CM_IDLEST_IVA2 */ 69/* CM_IDLEST_IVA2 */
70#define OMAP3430_ST_IVA2_SHIFT 0
70#define OMAP3430_ST_IVA2_MASK (1 << 0) 71#define OMAP3430_ST_IVA2_MASK (1 << 0)
71 72
72/* CM_IDLEST_PLL_IVA2 */ 73/* CM_IDLEST_PLL_IVA2 */
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index b54427dec2a3..ecaad7d371ee 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -47,7 +47,7 @@
47static void __iomem *wakeupgen_base; 47static void __iomem *wakeupgen_base;
48static void __iomem *sar_base; 48static void __iomem *sar_base;
49static DEFINE_SPINLOCK(wakeupgen_lock); 49static DEFINE_SPINLOCK(wakeupgen_lock);
50static unsigned int irq_target_cpu[NR_IRQS]; 50static unsigned int irq_target_cpu[MAX_IRQS];
51static unsigned int irq_banks = MAX_NR_REG_BANKS; 51static unsigned int irq_banks = MAX_NR_REG_BANKS;
52static unsigned int max_irqs = MAX_IRQS; 52static unsigned int max_irqs = MAX_IRQS;
53static unsigned int omap_secure_apis; 53static unsigned int omap_secure_apis;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 80b7359500f1..3615e0d9ee3c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
1889 _enable_sysc(oh); 1889 _enable_sysc(oh);
1890 } 1890 }
1891 } else { 1891 } else {
1892 _omap4_disable_module(oh);
1892 _disable_clocks(oh); 1893 _disable_clocks(oh);
1893 pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n", 1894 pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
1894 oh->name, r); 1895 oh->name, r);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index a1df9d4690f7..b1675e6214d3 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
100 100
101/* IVA2 (IVA2) */ 101/* IVA2 (IVA2) */
102static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { 102static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
103 { .name = "logic", .rst_shift = 0 }, 103 { .name = "logic", .rst_shift = 0, .st_shift = 8 },
104 { .name = "seq0", .rst_shift = 1 }, 104 { .name = "seq0", .rst_shift = 1, .st_shift = 9 },
105 { .name = "seq1", .rst_shift = 2 }, 105 { .name = "seq1", .rst_shift = 2, .st_shift = 10 },
106}; 106};
107 107
108static struct omap_hwmod omap3xxx_iva_hwmod = { 108static struct omap_hwmod omap3xxx_iva_hwmod = {
@@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
112 .rst_lines = omap3xxx_iva_resets, 112 .rst_lines = omap3xxx_iva_resets,
113 .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), 113 .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
114 .main_clk = "iva2_ck", 114 .main_clk = "iva2_ck",
115 .prcm = {
116 .omap2 = {
117 .module_offs = OMAP3430_IVA2_MOD,
118 .prcm_reg_id = 1,
119 .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
120 .idlest_reg_id = 1,
121 .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
122 }
123 },
115}; 124};
116 125
117/* timer class */ 126/* timer class */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index f033f950a232..f9bcb24cd515 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -4209,7 +4209,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
4209}; 4209};
4210 4210
4211/* dsp -> sl2if */ 4211/* dsp -> sl2if */
4212static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = { 4212static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
4213 .master = &omap44xx_dsp_hwmod, 4213 .master = &omap44xx_dsp_hwmod,
4214 .slave = &omap44xx_sl2if_hwmod, 4214 .slave = &omap44xx_sl2if_hwmod,
4215 .clk = "dpll_iva_m5x2_ck", 4215 .clk = "dpll_iva_m5x2_ck",
@@ -4827,7 +4827,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
4827}; 4827};
4828 4828
4829/* iva -> sl2if */ 4829/* iva -> sl2if */
4830static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = { 4830static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
4831 .master = &omap44xx_iva_hwmod, 4831 .master = &omap44xx_iva_hwmod,
4832 .slave = &omap44xx_sl2if_hwmod, 4832 .slave = &omap44xx_sl2if_hwmod,
4833 .clk = "dpll_iva_m5x2_ck", 4833 .clk = "dpll_iva_m5x2_ck",
@@ -5361,7 +5361,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
5361}; 5361};
5362 5362
5363/* l3_main_2 -> sl2if */ 5363/* l3_main_2 -> sl2if */
5364static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = { 5364static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
5365 .master = &omap44xx_l3_main_2_hwmod, 5365 .master = &omap44xx_l3_main_2_hwmod,
5366 .slave = &omap44xx_sl2if_hwmod, 5366 .slave = &omap44xx_sl2if_hwmod,
5367 .clk = "l3_div_ck", 5367 .clk = "l3_div_ck",
@@ -6031,7 +6031,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
6031 &omap44xx_l4_abe__dmic, 6031 &omap44xx_l4_abe__dmic,
6032 &omap44xx_l4_abe__dmic_dma, 6032 &omap44xx_l4_abe__dmic_dma,
6033 &omap44xx_dsp__iva, 6033 &omap44xx_dsp__iva,
6034 &omap44xx_dsp__sl2if, 6034 /* &omap44xx_dsp__sl2if, */
6035 &omap44xx_l4_cfg__dsp, 6035 &omap44xx_l4_cfg__dsp,
6036 &omap44xx_l3_main_2__dss, 6036 &omap44xx_l3_main_2__dss,
6037 &omap44xx_l4_per__dss, 6037 &omap44xx_l4_per__dss,
@@ -6067,7 +6067,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
6067 &omap44xx_l4_per__i2c4, 6067 &omap44xx_l4_per__i2c4,
6068 &omap44xx_l3_main_2__ipu, 6068 &omap44xx_l3_main_2__ipu,
6069 &omap44xx_l3_main_2__iss, 6069 &omap44xx_l3_main_2__iss,
6070 &omap44xx_iva__sl2if, 6070 /* &omap44xx_iva__sl2if, */
6071 &omap44xx_l3_main_2__iva, 6071 &omap44xx_l3_main_2__iva,
6072 &omap44xx_l4_wkup__kbd, 6072 &omap44xx_l4_wkup__kbd,
6073 &omap44xx_l4_cfg__mailbox, 6073 &omap44xx_l4_cfg__mailbox,
@@ -6098,7 +6098,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
6098 &omap44xx_l4_cfg__cm_core, 6098 &omap44xx_l4_cfg__cm_core,
6099 &omap44xx_l4_wkup__prm, 6099 &omap44xx_l4_wkup__prm,
6100 &omap44xx_l4_wkup__scrm, 6100 &omap44xx_l4_wkup__scrm,
6101 &omap44xx_l3_main_2__sl2if, 6101 /* &omap44xx_l3_main_2__sl2if, */
6102 &omap44xx_l4_abe__slimbus1, 6102 &omap44xx_l4_abe__slimbus1,
6103 &omap44xx_l4_abe__slimbus1_dma, 6103 &omap44xx_l4_abe__slimbus1_dma,
6104 &omap44xx_l4_per__slimbus2, 6104 &omap44xx_l4_per__slimbus2,
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index e17cf974d16c..5214d5bfba27 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -262,6 +262,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
262 return 0; 262 return 0;
263} 263}
264 264
265#ifdef CONFIG_OMAP_32K_TIMER
265/* Setup free-running counter for clocksource */ 266/* Setup free-running counter for clocksource */
266static int __init omap2_sync32k_clocksource_init(void) 267static int __init omap2_sync32k_clocksource_init(void)
267{ 268{
@@ -301,6 +302,12 @@ static int __init omap2_sync32k_clocksource_init(void)
301 302
302 return ret; 303 return ret;
303} 304}
305#else
306static inline int omap2_sync32k_clocksource_init(void)
307{
308 return -ENODEV;
309}
310#endif
304 311
305static void __init omap2_gptimer_clocksource_init(int gptimer_id, 312static void __init omap2_gptimer_clocksource_init(int gptimer_id,
306 const char *fck_source) 313 const char *fck_source)
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index b3226f80c985..5f3c03b61f8e 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -110,13 +110,6 @@ config TEGRA_DEBUG_UART_AUTO_SCRATCH
110 110
111endchoice 111endchoice
112 112
113config TEGRA_SYSTEM_DMA
114 bool "Enable system DMA driver for NVIDIA Tegra SoCs"
115 default y
116 help
117 Adds system DMA functionality for NVIDIA Tegra SoCs, used by
118 several Tegra device drivers
119
120config TEGRA_EMC_SCALING_ENABLE 113config TEGRA_EMC_SCALING_ENABLE
121 bool "Enable scaling the memory frequency" 114 bool "Enable scaling the memory frequency"
122 115
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 56065acbd816..0974ace45558 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30_clocks.o
18obj-$(CONFIG_SMP) += platsmp.o headsmp.o 18obj-$(CONFIG_SMP) += platsmp.o headsmp.o
19obj-$(CONFIG_SMP) += reset.o 19obj-$(CONFIG_SMP) += reset.o
20obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 20obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
21obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
22obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o 21obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
23obj-$(CONFIG_TEGRA_PCI) += pcie.o 22obj-$(CONFIG_TEGRA_PCI) += pcie.o
24obj-$(CONFIG_USB_SUPPORT) += usb_phy.o 23obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
index 643a37809a15..b5015d0f1912 100644
--- a/arch/arm/mach-tegra/apbio.c
+++ b/arch/arm/mach-tegra/apbio.c
@@ -28,7 +28,7 @@
28 28
29#include "apbio.h" 29#include "apbio.h"
30 30
31#if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA) 31#if defined(CONFIG_TEGRA20_APB_DMA)
32static DEFINE_MUTEX(tegra_apb_dma_lock); 32static DEFINE_MUTEX(tegra_apb_dma_lock);
33static u32 *tegra_apb_bb; 33static u32 *tegra_apb_bb;
34static dma_addr_t tegra_apb_bb_phys; 34static dma_addr_t tegra_apb_bb_phys;
@@ -37,121 +37,6 @@ static DECLARE_COMPLETION(tegra_apb_wait);
37static u32 tegra_apb_readl_direct(unsigned long offset); 37static u32 tegra_apb_readl_direct(unsigned long offset);
38static void tegra_apb_writel_direct(u32 value, unsigned long offset); 38static void tegra_apb_writel_direct(u32 value, unsigned long offset);
39 39
40#if defined(CONFIG_TEGRA_SYSTEM_DMA)
41static struct tegra_dma_channel *tegra_apb_dma;
42
43bool tegra_apb_init(void)
44{
45 struct tegra_dma_channel *ch;
46
47 mutex_lock(&tegra_apb_dma_lock);
48
49 /* Check to see if we raced to setup */
50 if (tegra_apb_dma)
51 goto out;
52
53 ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
54 TEGRA_DMA_SHARED);
55
56 if (!ch)
57 goto out_fail;
58
59 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
60 &tegra_apb_bb_phys, GFP_KERNEL);
61 if (!tegra_apb_bb) {
62 pr_err("%s: can not allocate bounce buffer\n", __func__);
63 tegra_dma_free_channel(ch);
64 goto out_fail;
65 }
66
67 tegra_apb_dma = ch;
68out:
69 mutex_unlock(&tegra_apb_dma_lock);
70 return true;
71
72out_fail:
73 mutex_unlock(&tegra_apb_dma_lock);
74 return false;
75}
76
77static void apb_dma_complete(struct tegra_dma_req *req)
78{
79 complete(&tegra_apb_wait);
80}
81
82static u32 tegra_apb_readl_using_dma(unsigned long offset)
83{
84 struct tegra_dma_req req;
85 int ret;
86
87 if (!tegra_apb_dma && !tegra_apb_init())
88 return tegra_apb_readl_direct(offset);
89
90 mutex_lock(&tegra_apb_dma_lock);
91 req.complete = apb_dma_complete;
92 req.to_memory = 1;
93 req.dest_addr = tegra_apb_bb_phys;
94 req.dest_bus_width = 32;
95 req.dest_wrap = 1;
96 req.source_addr = offset;
97 req.source_bus_width = 32;
98 req.source_wrap = 4;
99 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
100 req.size = 4;
101
102 INIT_COMPLETION(tegra_apb_wait);
103
104 tegra_dma_enqueue_req(tegra_apb_dma, &req);
105
106 ret = wait_for_completion_timeout(&tegra_apb_wait,
107 msecs_to_jiffies(50));
108
109 if (WARN(ret == 0, "apb read dma timed out")) {
110 tegra_dma_dequeue_req(tegra_apb_dma, &req);
111 *(u32 *)tegra_apb_bb = 0;
112 }
113
114 mutex_unlock(&tegra_apb_dma_lock);
115 return *((u32 *)tegra_apb_bb);
116}
117
118static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
119{
120 struct tegra_dma_req req;
121 int ret;
122
123 if (!tegra_apb_dma && !tegra_apb_init()) {
124 tegra_apb_writel_direct(value, offset);
125 return;
126 }
127
128 mutex_lock(&tegra_apb_dma_lock);
129 *((u32 *)tegra_apb_bb) = value;
130 req.complete = apb_dma_complete;
131 req.to_memory = 0;
132 req.dest_addr = offset;
133 req.dest_wrap = 4;
134 req.dest_bus_width = 32;
135 req.source_addr = tegra_apb_bb_phys;
136 req.source_bus_width = 32;
137 req.source_wrap = 1;
138 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
139 req.size = 4;
140
141 INIT_COMPLETION(tegra_apb_wait);
142
143 tegra_dma_enqueue_req(tegra_apb_dma, &req);
144
145 ret = wait_for_completion_timeout(&tegra_apb_wait,
146 msecs_to_jiffies(50));
147
148 if (WARN(ret == 0, "apb write dma timed out"))
149 tegra_dma_dequeue_req(tegra_apb_dma, &req);
150
151 mutex_unlock(&tegra_apb_dma_lock);
152}
153
154#else
155static struct dma_chan *tegra_apb_dma_chan; 40static struct dma_chan *tegra_apb_dma_chan;
156static struct dma_slave_config dma_sconfig; 41static struct dma_slave_config dma_sconfig;
157 42
@@ -279,7 +164,6 @@ static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
279 pr_err("error in writing offset 0x%08lx using dma\n", offset); 164 pr_err("error in writing offset 0x%08lx using dma\n", offset);
280 mutex_unlock(&tegra_apb_dma_lock); 165 mutex_unlock(&tegra_apb_dma_lock);
281} 166}
282#endif
283#else 167#else
284#define tegra_apb_readl_using_dma tegra_apb_readl_direct 168#define tegra_apb_readl_using_dma tegra_apb_readl_direct
285#define tegra_apb_writel_using_dma tegra_apb_writel_direct 169#define tegra_apb_writel_using_dma tegra_apb_writel_direct
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
deleted file mode 100644
index 29c5114d607c..000000000000
--- a/arch/arm/mach-tegra/dma.c
+++ /dev/null
@@ -1,823 +0,0 @@
1/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <linux/clk.h>
31#include <mach/dma.h>
32#include <mach/irqs.h>
33#include <mach/iomap.h>
34#include <mach/suspend.h>
35
36#include "apbio.h"
37
38#define APB_DMA_GEN 0x000
39#define GEN_ENABLE (1<<31)
40
41#define APB_DMA_CNTRL 0x010
42
43#define APB_DMA_IRQ_MASK 0x01c
44
45#define APB_DMA_IRQ_MASK_SET 0x020
46
47#define APB_DMA_CHAN_CSR 0x000
48#define CSR_ENB (1<<31)
49#define CSR_IE_EOC (1<<30)
50#define CSR_HOLD (1<<29)
51#define CSR_DIR (1<<28)
52#define CSR_ONCE (1<<27)
53#define CSR_FLOW (1<<21)
54#define CSR_REQ_SEL_SHIFT 16
55#define CSR_WCOUNT_SHIFT 2
56#define CSR_WCOUNT_MASK 0xFFFC
57
58#define APB_DMA_CHAN_STA 0x004
59#define STA_BUSY (1<<31)
60#define STA_ISE_EOC (1<<30)
61#define STA_HALT (1<<29)
62#define STA_PING_PONG (1<<28)
63#define STA_COUNT_SHIFT 2
64#define STA_COUNT_MASK 0xFFFC
65
66#define APB_DMA_CHAN_AHB_PTR 0x010
67
68#define APB_DMA_CHAN_AHB_SEQ 0x014
69#define AHB_SEQ_INTR_ENB (1<<31)
70#define AHB_SEQ_BUS_WIDTH_SHIFT 28
71#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
72#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
73#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
76#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
77#define AHB_SEQ_DATA_SWAP (1<<27)
78#define AHB_SEQ_BURST_MASK (0x7<<24)
79#define AHB_SEQ_BURST_1 (4<<24)
80#define AHB_SEQ_BURST_4 (5<<24)
81#define AHB_SEQ_BURST_8 (6<<24)
82#define AHB_SEQ_DBL_BUF (1<<19)
83#define AHB_SEQ_WRAP_SHIFT 16
84#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
85
86#define APB_DMA_CHAN_APB_PTR 0x018
87
88#define APB_DMA_CHAN_APB_SEQ 0x01c
89#define APB_SEQ_BUS_WIDTH_SHIFT 28
90#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
91#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
92#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
95#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
96#define APB_SEQ_DATA_SWAP (1<<27)
97#define APB_SEQ_WRAP_SHIFT 16
98#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
99
100#define TEGRA_SYSTEM_DMA_CH_NR 16
101#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
102#define TEGRA_SYSTEM_DMA_CH_MIN 0
103#define TEGRA_SYSTEM_DMA_CH_MAX \
104 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
105
106#define NV_DMA_MAX_TRASFER_SIZE 0x10000
107
108static const unsigned int ahb_addr_wrap_table[8] = {
109 0, 32, 64, 128, 256, 512, 1024, 2048
110};
111
112static const unsigned int apb_addr_wrap_table[8] = {
113 0, 1, 2, 4, 8, 16, 32, 64
114};
115
116static const unsigned int bus_width_table[5] = {
117 8, 16, 32, 64, 128
118};
119
120#define TEGRA_DMA_NAME_SIZE 16
121struct tegra_dma_channel {
122 struct list_head list;
123 int id;
124 spinlock_t lock;
125 char name[TEGRA_DMA_NAME_SIZE];
126 void __iomem *addr;
127 int mode;
128 int irq;
129 int req_transfer_count;
130};
131
132#define NV_DMA_MAX_CHANNELS 32
133
134static bool tegra_dma_initialized;
135static DEFINE_MUTEX(tegra_dma_lock);
136static DEFINE_SPINLOCK(enable_lock);
137
138static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
139static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
140
141static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
142 struct tegra_dma_req *req);
143static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
144 struct tegra_dma_req *req);
145static void tegra_dma_stop(struct tegra_dma_channel *ch);
146
147void tegra_dma_flush(struct tegra_dma_channel *ch)
148{
149}
150EXPORT_SYMBOL(tegra_dma_flush);
151
152void tegra_dma_dequeue(struct tegra_dma_channel *ch)
153{
154 struct tegra_dma_req *req;
155
156 if (tegra_dma_is_empty(ch))
157 return;
158
159 req = list_entry(ch->list.next, typeof(*req), node);
160
161 tegra_dma_dequeue_req(ch, req);
162 return;
163}
164
165static void tegra_dma_stop(struct tegra_dma_channel *ch)
166{
167 u32 csr;
168 u32 status;
169
170 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
171 csr &= ~CSR_IE_EOC;
172 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
173
174 csr &= ~CSR_ENB;
175 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
176
177 status = readl(ch->addr + APB_DMA_CHAN_STA);
178 if (status & STA_ISE_EOC)
179 writel(status, ch->addr + APB_DMA_CHAN_STA);
180}
181
182static int tegra_dma_cancel(struct tegra_dma_channel *ch)
183{
184 unsigned long irq_flags;
185
186 spin_lock_irqsave(&ch->lock, irq_flags);
187 while (!list_empty(&ch->list))
188 list_del(ch->list.next);
189
190 tegra_dma_stop(ch);
191
192 spin_unlock_irqrestore(&ch->lock, irq_flags);
193 return 0;
194}
195
196static unsigned int get_channel_status(struct tegra_dma_channel *ch,
197 struct tegra_dma_req *req, bool is_stop_dma)
198{
199 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
200 unsigned int status;
201
202 if (is_stop_dma) {
203 /*
204 * STOP the DMA and get the transfer count.
205 * Getting the transfer count is tricky.
206 * - Globally disable DMA on all channels
207 * - Read the channel's status register to know the number
208 * of pending bytes to be transfered.
209 * - Stop the dma channel
210 * - Globally re-enable DMA to resume other transfers
211 */
212 spin_lock(&enable_lock);
213 writel(0, addr + APB_DMA_GEN);
214 udelay(20);
215 status = readl(ch->addr + APB_DMA_CHAN_STA);
216 tegra_dma_stop(ch);
217 writel(GEN_ENABLE, addr + APB_DMA_GEN);
218 spin_unlock(&enable_lock);
219 if (status & STA_ISE_EOC) {
220 pr_err("Got Dma Int here clearing");
221 writel(status, ch->addr + APB_DMA_CHAN_STA);
222 }
223 req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
224 } else {
225 status = readl(ch->addr + APB_DMA_CHAN_STA);
226 }
227 return status;
228}
229
230/* should be called with the channel lock held */
231static unsigned int dma_active_count(struct tegra_dma_channel *ch,
232 struct tegra_dma_req *req, unsigned int status)
233{
234 unsigned int to_transfer;
235 unsigned int req_transfer_count;
236 unsigned int bytes_transferred;
237
238 to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
239 req_transfer_count = ch->req_transfer_count + 1;
240 bytes_transferred = req_transfer_count;
241 if (status & STA_BUSY)
242 bytes_transferred -= to_transfer;
243 /*
244 * In continuous transfer mode, DMA only tracks the count of the
245 * half DMA buffer. So, if the DMA already finished half the DMA
246 * then add the half buffer to the completed count.
247 */
248 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) {
249 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
250 bytes_transferred += req_transfer_count;
251 if (status & STA_ISE_EOC)
252 bytes_transferred += req_transfer_count;
253 }
254 bytes_transferred *= 4;
255 return bytes_transferred;
256}
257
258int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
259 struct tegra_dma_req *_req)
260{
261 unsigned int status;
262 struct tegra_dma_req *req = NULL;
263 int found = 0;
264 unsigned long irq_flags;
265 int stop = 0;
266
267 spin_lock_irqsave(&ch->lock, irq_flags);
268
269 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
270 stop = 1;
271
272 list_for_each_entry(req, &ch->list, node) {
273 if (req == _req) {
274 list_del(&req->node);
275 found = 1;
276 break;
277 }
278 }
279 if (!found) {
280 spin_unlock_irqrestore(&ch->lock, irq_flags);
281 return 0;
282 }
283
284 if (!stop)
285 goto skip_stop_dma;
286
287 status = get_channel_status(ch, req, true);
288 req->bytes_transferred = dma_active_count(ch, req, status);
289
290 if (!list_empty(&ch->list)) {
291 /* if the list is not empty, queue the next request */
292 struct tegra_dma_req *next_req;
293 next_req = list_entry(ch->list.next,
294 typeof(*next_req), node);
295 tegra_dma_update_hw(ch, next_req);
296 }
297
298skip_stop_dma:
299 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
300
301 spin_unlock_irqrestore(&ch->lock, irq_flags);
302
303 /* Callback should be called without any lock */
304 req->complete(req);
305 return 0;
306}
307EXPORT_SYMBOL(tegra_dma_dequeue_req);
308
309bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
310{
311 unsigned long irq_flags;
312 bool is_empty;
313
314 spin_lock_irqsave(&ch->lock, irq_flags);
315 if (list_empty(&ch->list))
316 is_empty = true;
317 else
318 is_empty = false;
319 spin_unlock_irqrestore(&ch->lock, irq_flags);
320 return is_empty;
321}
322EXPORT_SYMBOL(tegra_dma_is_empty);
323
324bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
325 struct tegra_dma_req *_req)
326{
327 unsigned long irq_flags;
328 struct tegra_dma_req *req;
329
330 spin_lock_irqsave(&ch->lock, irq_flags);
331 list_for_each_entry(req, &ch->list, node) {
332 if (req == _req) {
333 spin_unlock_irqrestore(&ch->lock, irq_flags);
334 return true;
335 }
336 }
337 spin_unlock_irqrestore(&ch->lock, irq_flags);
338 return false;
339}
340EXPORT_SYMBOL(tegra_dma_is_req_inflight);
341
342int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
343 struct tegra_dma_req *req)
344{
345 unsigned long irq_flags;
346 struct tegra_dma_req *_req;
347 int start_dma = 0;
348
349 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
350 req->source_addr & 0x3 || req->dest_addr & 0x3) {
351 pr_err("Invalid DMA request for channel %d\n", ch->id);
352 return -EINVAL;
353 }
354
355 spin_lock_irqsave(&ch->lock, irq_flags);
356
357 list_for_each_entry(_req, &ch->list, node) {
358 if (req == _req) {
359 spin_unlock_irqrestore(&ch->lock, irq_flags);
360 return -EEXIST;
361 }
362 }
363
364 req->bytes_transferred = 0;
365 req->status = 0;
366 req->buffer_status = 0;
367 if (list_empty(&ch->list))
368 start_dma = 1;
369
370 list_add_tail(&req->node, &ch->list);
371
372 if (start_dma)
373 tegra_dma_update_hw(ch, req);
374
375 spin_unlock_irqrestore(&ch->lock, irq_flags);
376
377 return 0;
378}
379EXPORT_SYMBOL(tegra_dma_enqueue_req);
380
381struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
382{
383 int channel;
384 struct tegra_dma_channel *ch = NULL;
385
386 if (!tegra_dma_initialized)
387 return NULL;
388
389 mutex_lock(&tegra_dma_lock);
390
391 /* first channel is the shared channel */
392 if (mode & TEGRA_DMA_SHARED) {
393 channel = TEGRA_SYSTEM_DMA_CH_MIN;
394 } else {
395 channel = find_first_zero_bit(channel_usage,
396 ARRAY_SIZE(dma_channels));
397 if (channel >= ARRAY_SIZE(dma_channels))
398 goto out;
399 }
400 __set_bit(channel, channel_usage);
401 ch = &dma_channels[channel];
402 ch->mode = mode;
403
404out:
405 mutex_unlock(&tegra_dma_lock);
406 return ch;
407}
408EXPORT_SYMBOL(tegra_dma_allocate_channel);
409
410void tegra_dma_free_channel(struct tegra_dma_channel *ch)
411{
412 if (ch->mode & TEGRA_DMA_SHARED)
413 return;
414 tegra_dma_cancel(ch);
415 mutex_lock(&tegra_dma_lock);
416 __clear_bit(ch->id, channel_usage);
417 mutex_unlock(&tegra_dma_lock);
418}
419EXPORT_SYMBOL(tegra_dma_free_channel);
420
421static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
422 struct tegra_dma_req *req)
423{
424 u32 apb_ptr;
425 u32 ahb_ptr;
426
427 if (req->to_memory) {
428 apb_ptr = req->source_addr;
429 ahb_ptr = req->dest_addr;
430 } else {
431 apb_ptr = req->dest_addr;
432 ahb_ptr = req->source_addr;
433 }
434 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
435 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
436
437 req->status = TEGRA_DMA_REQ_INFLIGHT;
438 return;
439}
440
441static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
442 struct tegra_dma_req *req)
443{
444 int ahb_addr_wrap;
445 int apb_addr_wrap;
446 int ahb_bus_width;
447 int apb_bus_width;
448 int index;
449
450 u32 ahb_seq;
451 u32 apb_seq;
452 u32 ahb_ptr;
453 u32 apb_ptr;
454 u32 csr;
455
456 csr = CSR_IE_EOC | CSR_FLOW;
457 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
458 apb_seq = 0;
459
460 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
461
462 /* One shot mode is always single buffered,
463 * continuous mode is always double buffered
464 * */
465 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
466 csr |= CSR_ONCE;
467 ch->req_transfer_count = (req->size >> 2) - 1;
468 } else {
469 ahb_seq |= AHB_SEQ_DBL_BUF;
470
471 /* In double buffered mode, we set the size to half the
472 * requested size and interrupt when half the buffer
473 * is full */
474 ch->req_transfer_count = (req->size >> 3) - 1;
475 }
476
477 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
478
479 if (req->to_memory) {
480 apb_ptr = req->source_addr;
481 ahb_ptr = req->dest_addr;
482
483 apb_addr_wrap = req->source_wrap;
484 ahb_addr_wrap = req->dest_wrap;
485 apb_bus_width = req->source_bus_width;
486 ahb_bus_width = req->dest_bus_width;
487
488 } else {
489 csr |= CSR_DIR;
490 apb_ptr = req->dest_addr;
491 ahb_ptr = req->source_addr;
492
493 apb_addr_wrap = req->dest_wrap;
494 ahb_addr_wrap = req->source_wrap;
495 apb_bus_width = req->dest_bus_width;
496 ahb_bus_width = req->source_bus_width;
497 }
498
499 apb_addr_wrap >>= 2;
500 ahb_addr_wrap >>= 2;
501
502 /* set address wrap for APB size */
503 index = 0;
504 do {
505 if (apb_addr_wrap_table[index] == apb_addr_wrap)
506 break;
507 index++;
508 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
509 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
510 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
511
512 /* set address wrap for AHB size */
513 index = 0;
514 do {
515 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
516 break;
517 index++;
518 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
519 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
520 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
521
522 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
523 if (bus_width_table[index] == ahb_bus_width)
524 break;
525 }
526 BUG_ON(index == ARRAY_SIZE(bus_width_table));
527 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
528
529 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
530 if (bus_width_table[index] == apb_bus_width)
531 break;
532 }
533 BUG_ON(index == ARRAY_SIZE(bus_width_table));
534 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
535
536 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
537 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
538 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
539 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
540 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
541
542 csr |= CSR_ENB;
543 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
544
545 req->status = TEGRA_DMA_REQ_INFLIGHT;
546}
547
548static void handle_oneshot_dma(struct tegra_dma_channel *ch)
549{
550 struct tegra_dma_req *req;
551 unsigned long irq_flags;
552
553 spin_lock_irqsave(&ch->lock, irq_flags);
554 if (list_empty(&ch->list)) {
555 spin_unlock_irqrestore(&ch->lock, irq_flags);
556 return;
557 }
558
559 req = list_entry(ch->list.next, typeof(*req), node);
560 if (req) {
561 int bytes_transferred;
562
563 bytes_transferred = ch->req_transfer_count;
564 bytes_transferred += 1;
565 bytes_transferred <<= 2;
566
567 list_del(&req->node);
568 req->bytes_transferred = bytes_transferred;
569 req->status = TEGRA_DMA_REQ_SUCCESS;
570
571 spin_unlock_irqrestore(&ch->lock, irq_flags);
572 /* Callback should be called without any lock */
573 pr_debug("%s: transferred %d bytes\n", __func__,
574 req->bytes_transferred);
575 req->complete(req);
576 spin_lock_irqsave(&ch->lock, irq_flags);
577 }
578
579 if (!list_empty(&ch->list)) {
580 req = list_entry(ch->list.next, typeof(*req), node);
581 /* the complete function we just called may have enqueued
582 another req, in which case dma has already started */
583 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
584 tegra_dma_update_hw(ch, req);
585 }
586 spin_unlock_irqrestore(&ch->lock, irq_flags);
587}
588
589static void handle_continuous_dma(struct tegra_dma_channel *ch)
590{
591 struct tegra_dma_req *req;
592 unsigned long irq_flags;
593
594 spin_lock_irqsave(&ch->lock, irq_flags);
595 if (list_empty(&ch->list)) {
596 spin_unlock_irqrestore(&ch->lock, irq_flags);
597 return;
598 }
599
600 req = list_entry(ch->list.next, typeof(*req), node);
601 if (req) {
602 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
603 bool is_dma_ping_complete;
604 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
605 & STA_PING_PONG) ? true : false;
606 if (req->to_memory)
607 is_dma_ping_complete = !is_dma_ping_complete;
608 /* Out of sync - Release current buffer */
609 if (!is_dma_ping_complete) {
610 int bytes_transferred;
611
612 bytes_transferred = ch->req_transfer_count;
613 bytes_transferred += 1;
614 bytes_transferred <<= 3;
615 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
616 req->bytes_transferred = bytes_transferred;
617 req->status = TEGRA_DMA_REQ_SUCCESS;
618 tegra_dma_stop(ch);
619
620 if (!list_is_last(&req->node, &ch->list)) {
621 struct tegra_dma_req *next_req;
622
623 next_req = list_entry(req->node.next,
624 typeof(*next_req), node);
625 tegra_dma_update_hw(ch, next_req);
626 }
627
628 list_del(&req->node);
629
630 /* DMA lock is NOT held when callbak is called */
631 spin_unlock_irqrestore(&ch->lock, irq_flags);
632 req->complete(req);
633 return;
634 }
635 /* Load the next request into the hardware, if available
636 * */
637 if (!list_is_last(&req->node, &ch->list)) {
638 struct tegra_dma_req *next_req;
639
640 next_req = list_entry(req->node.next,
641 typeof(*next_req), node);
642 tegra_dma_update_hw_partial(ch, next_req);
643 }
644 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
645 req->status = TEGRA_DMA_REQ_SUCCESS;
646 /* DMA lock is NOT held when callback is called */
647 spin_unlock_irqrestore(&ch->lock, irq_flags);
648 if (likely(req->threshold))
649 req->threshold(req);
650 return;
651
652 } else if (req->buffer_status ==
653 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
654 /* Callback when the buffer is completely full (i.e on
655 * the second interrupt */
656 int bytes_transferred;
657
658 bytes_transferred = ch->req_transfer_count;
659 bytes_transferred += 1;
660 bytes_transferred <<= 3;
661
662 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
663 req->bytes_transferred = bytes_transferred;
664 req->status = TEGRA_DMA_REQ_SUCCESS;
665 list_del(&req->node);
666
667 /* DMA lock is NOT held when callbak is called */
668 spin_unlock_irqrestore(&ch->lock, irq_flags);
669 req->complete(req);
670 return;
671
672 } else {
673 BUG();
674 }
675 }
676 spin_unlock_irqrestore(&ch->lock, irq_flags);
677}
678
679static irqreturn_t dma_isr(int irq, void *data)
680{
681 struct tegra_dma_channel *ch = data;
682 unsigned long status;
683
684 status = readl(ch->addr + APB_DMA_CHAN_STA);
685 if (status & STA_ISE_EOC)
686 writel(status, ch->addr + APB_DMA_CHAN_STA);
687 else {
688 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
689 return IRQ_HANDLED;
690 }
691 return IRQ_WAKE_THREAD;
692}
693
694static irqreturn_t dma_thread_fn(int irq, void *data)
695{
696 struct tegra_dma_channel *ch = data;
697
698 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
699 handle_oneshot_dma(ch);
700 else
701 handle_continuous_dma(ch);
702
703
704 return IRQ_HANDLED;
705}
706
707int __init tegra_dma_init(void)
708{
709 int ret = 0;
710 int i;
711 unsigned int irq;
712 void __iomem *addr;
713 struct clk *c;
714
715 bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
716
717 c = clk_get_sys("tegra-apbdma", NULL);
718 if (IS_ERR(c)) {
719 pr_err("Unable to get clock for APB DMA\n");
720 ret = PTR_ERR(c);
721 goto fail;
722 }
723 ret = clk_prepare_enable(c);
724 if (ret != 0) {
725 pr_err("Unable to enable clock for APB DMA\n");
726 goto fail;
727 }
728
729 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
730 writel(GEN_ENABLE, addr + APB_DMA_GEN);
731 writel(0, addr + APB_DMA_CNTRL);
732 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
733 addr + APB_DMA_IRQ_MASK_SET);
734
735 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
736 struct tegra_dma_channel *ch = &dma_channels[i];
737
738 ch->id = i;
739 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
740
741 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
742 TEGRA_APB_DMA_CH0_SIZE * i);
743
744 spin_lock_init(&ch->lock);
745 INIT_LIST_HEAD(&ch->list);
746
747 irq = INT_APB_DMA_CH0 + i;
748 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
749 dma_channels[i].name, ch);
750 if (ret) {
751 pr_err("Failed to register IRQ %d for DMA %d\n",
752 irq, i);
753 goto fail;
754 }
755 ch->irq = irq;
756
757 __clear_bit(i, channel_usage);
758 }
759 /* mark the shared channel allocated */
760 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
761
762 tegra_dma_initialized = true;
763
764 return 0;
765fail:
766 writel(0, addr + APB_DMA_GEN);
767 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
768 struct tegra_dma_channel *ch = &dma_channels[i];
769 if (ch->irq)
770 free_irq(ch->irq, ch);
771 }
772 return ret;
773}
774postcore_initcall(tegra_dma_init);
775
776#ifdef CONFIG_PM
777static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
778
779void tegra_dma_suspend(void)
780{
781 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
782 u32 *ctx = apb_dma;
783 int i;
784
785 *ctx++ = readl(addr + APB_DMA_GEN);
786 *ctx++ = readl(addr + APB_DMA_CNTRL);
787 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
788
789 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
790 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
791 TEGRA_APB_DMA_CH0_SIZE * i);
792
793 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
794 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
795 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
796 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
797 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
798 }
799}
800
801void tegra_dma_resume(void)
802{
803 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
804 u32 *ctx = apb_dma;
805 int i;
806
807 writel(*ctx++, addr + APB_DMA_GEN);
808 writel(*ctx++, addr + APB_DMA_CNTRL);
809 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
810
811 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
812 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
813 TEGRA_APB_DMA_CH0_SIZE * i);
814
815 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
816 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
817 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
818 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
819 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
820 }
821}
822
823#endif
diff --git a/arch/arm/mach-tegra/include/mach/dma.h b/arch/arm/mach-tegra/include/mach/dma.h
index 9077092812c0..3081cc6dda3b 100644
--- a/arch/arm/mach-tegra/include/mach/dma.h
+++ b/arch/arm/mach-tegra/include/mach/dma.h
@@ -51,101 +51,4 @@
51#define TEGRA_DMA_REQ_SEL_OWR 25 51#define TEGRA_DMA_REQ_SEL_OWR 25
52#define TEGRA_DMA_REQ_SEL_INVALID 31 52#define TEGRA_DMA_REQ_SEL_INVALID 31
53 53
54struct tegra_dma_req;
55struct tegra_dma_channel;
56
57enum tegra_dma_mode {
58 TEGRA_DMA_SHARED = 1,
59 TEGRA_DMA_MODE_CONTINOUS = 2,
60 TEGRA_DMA_MODE_ONESHOT = 4,
61};
62
63enum tegra_dma_req_error {
64 TEGRA_DMA_REQ_SUCCESS = 0,
65 TEGRA_DMA_REQ_ERROR_ABORTED,
66 TEGRA_DMA_REQ_INFLIGHT,
67};
68
69enum tegra_dma_req_buff_status {
70 TEGRA_DMA_REQ_BUF_STATUS_EMPTY = 0,
71 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL,
72 TEGRA_DMA_REQ_BUF_STATUS_FULL,
73};
74
75struct tegra_dma_req {
76 struct list_head node;
77 unsigned int modid;
78 int instance;
79
80 /* Called when the req is complete and from the DMA ISR context.
81 * When this is called the req structure is no longer queued by
82 * the DMA channel.
83 *
84 * State of the DMA depends on the number of req it has. If there are
85 * no DMA requests queued up, then it will STOP the DMA. It there are
86 * more requests in the DMA, then it will queue the next request.
87 */
88 void (*complete)(struct tegra_dma_req *req);
89
90 /* This is a called from the DMA ISR context when the DMA is still in
91 * progress and is actively filling same buffer.
92 *
93 * In case of continuous mode receive, this threshold is 1/2 the buffer
94 * size. In other cases, this will not even be called as there is no
95 * hardware support for it.
96 *
97 * In the case of continuous mode receive, if there is next req already
98 * queued, DMA programs the HW to use that req when this req is
99 * completed. If there is no "next req" queued, then DMA ISR doesn't do
100 * anything before calling this callback.
101 *
102 * This is mainly used by the cases, where the clients has queued
103 * only one req and want to get some sort of DMA threshold
104 * callback to program the next buffer.
105 *
106 */
107 void (*threshold)(struct tegra_dma_req *req);
108
109 /* 1 to copy to memory.
110 * 0 to copy from the memory to device FIFO */
111 int to_memory;
112
113 void *virt_addr;
114
115 unsigned long source_addr;
116 unsigned long dest_addr;
117 unsigned long dest_wrap;
118 unsigned long source_wrap;
119 unsigned long source_bus_width;
120 unsigned long dest_bus_width;
121 unsigned long req_sel;
122 unsigned int size;
123
124 /* Updated by the DMA driver on the conpletion of the request. */
125 int bytes_transferred;
126 int status;
127
128 /* DMA completion tracking information */
129 int buffer_status;
130
131 /* Client specific data */
132 void *dev;
133};
134
135int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
136 struct tegra_dma_req *req);
137int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
138 struct tegra_dma_req *req);
139void tegra_dma_dequeue(struct tegra_dma_channel *ch);
140void tegra_dma_flush(struct tegra_dma_channel *ch);
141
142bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
143 struct tegra_dma_req *req);
144bool tegra_dma_is_empty(struct tegra_dma_channel *ch);
145
146struct tegra_dma_channel *tegra_dma_allocate_channel(int mode);
147void tegra_dma_free_channel(struct tegra_dma_channel *ch);
148
149int __init tegra_dma_init(void);
150
151#endif 54#endif
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93e..4e07eec1270d 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
63 pid = task_pid_nr(thread->task) << ASID_BITS; 63 pid = task_pid_nr(thread->task) << ASID_BITS;
64 asm volatile( 64 asm volatile(
65 " mrc p15, 0, %0, c13, c0, 1\n" 65 " mrc p15, 0, %0, c13, c0, 1\n"
66 " bfi %1, %0, #0, %2\n" 66 " and %0, %0, %2\n"
67 " mcr p15, 0, %1, c13, c0, 1\n" 67 " orr %0, %0, %1\n"
68 " mcr p15, 0, %0, c13, c0, 1\n"
68 : "=r" (contextidr), "+r" (pid) 69 : "=r" (contextidr), "+r" (pid)
69 : "I" (ASID_BITS)); 70 : "I" (~ASID_MASK));
70 isb(); 71 isb();
71 72
72 return NOTIFY_OK; 73 return NOTIFY_OK;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 051204fc4617..e59c4ab71bcb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size)
489 void *pool_start = pool->vaddr; 489 void *pool_start = pool->vaddr;
490 void *pool_end = pool->vaddr + pool->size; 490 void *pool_end = pool->vaddr + pool->size;
491 491
492 if (start < pool_start || start > pool_end) 492 if (start < pool_start || start >= pool_end)
493 return false; 493 return false;
494 494
495 if (end <= pool_end) 495 if (end <= pool_end)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a7a9e41fa2c2..18144e6a3115 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -990,8 +990,8 @@ void __init sanity_check_meminfo(void)
990 * Check whether this memory bank would partially overlap 990 * Check whether this memory bank would partially overlap
991 * the vmalloc area. 991 * the vmalloc area.
992 */ 992 */
993 if (__va(bank->start + bank->size) > vmalloc_min || 993 if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
994 __va(bank->start + bank->size) < __va(bank->start)) { 994 __va(bank->start + bank->size - 1) <= __va(bank->start)) {
995 unsigned long newsize = vmalloc_min - __va(bank->start); 995 unsigned long newsize = vmalloc_min - __va(bank->start);
996 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " 996 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
997 "to -%.8llx (vmalloc region overlap).\n", 997 "to -%.8llx (vmalloc region overlap).\n",
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index d861aa73299d..28acb383e7df 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -67,6 +67,7 @@
67 67
68static unsigned long omap_sram_start; 68static unsigned long omap_sram_start;
69static void __iomem *omap_sram_base; 69static void __iomem *omap_sram_base;
70static unsigned long omap_sram_skip;
70static unsigned long omap_sram_size; 71static unsigned long omap_sram_size;
71static void __iomem *omap_sram_ceil; 72static void __iomem *omap_sram_ceil;
72 73
@@ -105,6 +106,7 @@ static int is_sram_locked(void)
105 */ 106 */
106static void __init omap_detect_sram(void) 107static void __init omap_detect_sram(void)
107{ 108{
109 omap_sram_skip = SRAM_BOOTLOADER_SZ;
108 if (cpu_class_is_omap2()) { 110 if (cpu_class_is_omap2()) {
109 if (is_sram_locked()) { 111 if (is_sram_locked()) {
110 if (cpu_is_omap34xx()) { 112 if (cpu_is_omap34xx()) {
@@ -112,6 +114,7 @@ static void __init omap_detect_sram(void)
112 if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || 114 if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
113 (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { 115 (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
114 omap_sram_size = 0x7000; /* 28K */ 116 omap_sram_size = 0x7000; /* 28K */
117 omap_sram_skip += SZ_16K;
115 } else { 118 } else {
116 omap_sram_size = 0x8000; /* 32K */ 119 omap_sram_size = 0x8000; /* 32K */
117 } 120 }
@@ -174,8 +177,10 @@ static void __init omap_map_sram(void)
174 return; 177 return;
175 178
176#ifdef CONFIG_OMAP4_ERRATA_I688 179#ifdef CONFIG_OMAP4_ERRATA_I688
180 if (cpu_is_omap44xx()) {
177 omap_sram_start += PAGE_SIZE; 181 omap_sram_start += PAGE_SIZE;
178 omap_sram_size -= SZ_16K; 182 omap_sram_size -= SZ_16K;
183 }
179#endif 184#endif
180 if (cpu_is_omap34xx()) { 185 if (cpu_is_omap34xx()) {
181 /* 186 /*
@@ -202,8 +207,8 @@ static void __init omap_map_sram(void)
202 * Looks like we need to preserve some bootloader code at the 207 * Looks like we need to preserve some bootloader code at the
203 * beginning of SRAM for jumping to flash for reboot to work... 208 * beginning of SRAM for jumping to flash for reboot to work...
204 */ 209 */
205 memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0, 210 memset_io(omap_sram_base + omap_sram_skip, 0,
206 omap_sram_size - SRAM_BOOTLOADER_SZ); 211 omap_sram_size - omap_sram_skip);
207} 212}
208 213
209/* 214/*
@@ -217,7 +222,7 @@ void *omap_sram_push_address(unsigned long size)
217{ 222{
218 unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; 223 unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
219 224
220 available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); 225 available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
221 226
222 if (size > available) { 227 if (size > available) {
223 pr_err("Not enough space in SRAM\n"); 228 pr_err("Not enough space in SRAM\n");
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f34861920634..c7092e6057c5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -38,6 +38,7 @@ config BLACKFIN
38 select GENERIC_ATOMIC64 38 select GENERIC_ATOMIC64
39 select GENERIC_IRQ_PROBE 39 select GENERIC_IRQ_PROBE
40 select IRQ_PER_CPU if SMP 40 select IRQ_PER_CPU if SMP
41 select USE_GENERIC_SMP_HELPERS if SMP
41 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 42 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
42 select GENERIC_SMP_IDLE_THREAD 43 select GENERIC_SMP_IDLE_THREAD
43 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS 44 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d3d7e64ca96d..66cf00095b84 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -20,7 +20,6 @@ endif
20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) 20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
21KBUILD_CFLAGS_MODULE += -mlong-calls 21KBUILD_CFLAGS_MODULE += -mlong-calls
22LDFLAGS += -m elf32bfin 22LDFLAGS += -m elf32bfin
23KALLSYMS += --symbol-prefix=_
24 23
25KBUILD_DEFCONFIG := BF537-STAMP_defconfig 24KBUILD_DEFCONFIG := BF537-STAMP_defconfig
26 25
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index dc3d144b4bb5..9631598dcc5d 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -18,6 +18,8 @@
18#define raw_smp_processor_id() blackfin_core_id() 18#define raw_smp_processor_id() blackfin_core_id()
19 19
20extern void bfin_relocate_coreb_l1_mem(void); 20extern void bfin_relocate_coreb_l1_mem(void);
21extern void arch_send_call_function_single_ipi(int cpu);
22extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
21 23
22#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) 24#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
23asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); 25asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 00bbe672b3b3..a40151306b77 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
48 48
49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
50 50
51#define BFIN_IPI_TIMER 0 51enum ipi_message_type {
52#define BFIN_IPI_RESCHEDULE 1 52 BFIN_IPI_TIMER,
53#define BFIN_IPI_CALL_FUNC 2 53 BFIN_IPI_RESCHEDULE,
54#define BFIN_IPI_CPU_STOP 3 54 BFIN_IPI_CALL_FUNC,
55 BFIN_IPI_CALL_FUNC_SINGLE,
56 BFIN_IPI_CPU_STOP,
57};
55 58
56struct blackfin_flush_data { 59struct blackfin_flush_data {
57 unsigned long start; 60 unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
60 63
61void *secondary_stack; 64void *secondary_stack;
62 65
63
64struct smp_call_struct {
65 void (*func)(void *info);
66 void *info;
67 int wait;
68 cpumask_t *waitmask;
69};
70
71static struct blackfin_flush_data smp_flush_data; 66static struct blackfin_flush_data smp_flush_data;
72 67
73static DEFINE_SPINLOCK(stop_lock); 68static DEFINE_SPINLOCK(stop_lock);
74 69
75struct ipi_message {
76 unsigned long type;
77 struct smp_call_struct call_struct;
78};
79
80/* A magic number - stress test shows this is safe for common cases */ 70/* A magic number - stress test shows this is safe for common cases */
81#define BFIN_IPI_MSGQ_LEN 5 71#define BFIN_IPI_MSGQ_LEN 5
82 72
83/* Simple FIFO buffer, overflow leads to panic */ 73/* Simple FIFO buffer, overflow leads to panic */
84struct ipi_message_queue { 74struct ipi_data {
85 spinlock_t lock;
86 unsigned long count; 75 unsigned long count;
87 unsigned long head; /* head of the queue */ 76 unsigned long bits;
88 struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
89}; 77};
90 78
91static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); 79static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
92 80
93static void ipi_cpu_stop(unsigned int cpu) 81static void ipi_cpu_stop(unsigned int cpu)
94{ 82{
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
129 blackfin_icache_flush_range(fdata->start, fdata->end); 117 blackfin_icache_flush_range(fdata->start, fdata->end);
130} 118}
131 119
132static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
133{
134 int wait;
135 void (*func)(void *info);
136 void *info;
137 func = msg->call_struct.func;
138 info = msg->call_struct.info;
139 wait = msg->call_struct.wait;
140 func(info);
141 if (wait) {
142#ifdef __ARCH_SYNC_CORE_DCACHE
143 /*
144 * 'wait' usually means synchronization between CPUs.
145 * Invalidate D cache in case shared data was changed
146 * by func() to ensure cache coherence.
147 */
148 resync_core_dcache();
149#endif
150 cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
151 }
152}
153
154/* Use IRQ_SUPPLE_0 to request reschedule. 120/* Use IRQ_SUPPLE_0 to request reschedule.
155 * When returning from interrupt to user space, 121 * When returning from interrupt to user space,
156 * there is chance to reschedule */ 122 * there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
172 138
173static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) 139static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
174{ 140{
175 struct ipi_message *msg; 141 struct ipi_data *bfin_ipi_data;
176 struct ipi_message_queue *msg_queue;
177 unsigned int cpu = smp_processor_id(); 142 unsigned int cpu = smp_processor_id();
178 unsigned long flags; 143 unsigned long pending;
144 unsigned long msg;
179 145
180 platform_clear_ipi(cpu, IRQ_SUPPLE_1); 146 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
181 147
182 msg_queue = &__get_cpu_var(ipi_msg_queue); 148 bfin_ipi_data = &__get_cpu_var(bfin_ipi);
183 149
184 spin_lock_irqsave(&msg_queue->lock, flags); 150 while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
185 151 msg = 0;
186 while (msg_queue->count) { 152 do {
187 msg = &msg_queue->ipi_message[msg_queue->head]; 153 msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
188 switch (msg->type) { 154 switch (msg) {
189 case BFIN_IPI_TIMER: 155 case BFIN_IPI_TIMER:
190 ipi_timer(); 156 ipi_timer();
191 break; 157 break;
192 case BFIN_IPI_RESCHEDULE: 158 case BFIN_IPI_RESCHEDULE:
193 scheduler_ipi(); 159 scheduler_ipi();
194 break; 160 break;
195 case BFIN_IPI_CALL_FUNC: 161 case BFIN_IPI_CALL_FUNC:
196 ipi_call_function(cpu, msg); 162 generic_smp_call_function_interrupt();
197 break; 163 break;
198 case BFIN_IPI_CPU_STOP: 164
199 ipi_cpu_stop(cpu); 165 case BFIN_IPI_CALL_FUNC_SINGLE:
200 break; 166 generic_smp_call_function_single_interrupt();
201 default: 167 break;
202 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", 168
203 cpu, msg->type); 169 case BFIN_IPI_CPU_STOP:
204 break; 170 ipi_cpu_stop(cpu);
205 } 171 break;
206 msg_queue->head++; 172 }
207 msg_queue->head %= BFIN_IPI_MSGQ_LEN; 173 } while (msg < BITS_PER_LONG);
208 msg_queue->count--; 174
175 smp_mb();
209 } 176 }
210 spin_unlock_irqrestore(&msg_queue->lock, flags);
211 return IRQ_HANDLED; 177 return IRQ_HANDLED;
212} 178}
213 179
214static void ipi_queue_init(void) 180static void bfin_ipi_init(void)
215{ 181{
216 unsigned int cpu; 182 unsigned int cpu;
217 struct ipi_message_queue *msg_queue; 183 struct ipi_data *bfin_ipi_data;
218 for_each_possible_cpu(cpu) { 184 for_each_possible_cpu(cpu) {
219 msg_queue = &per_cpu(ipi_msg_queue, cpu); 185 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
220 spin_lock_init(&msg_queue->lock); 186 bfin_ipi_data->bits = 0;
221 msg_queue->count = 0; 187 bfin_ipi_data->count = 0;
222 msg_queue->head = 0;
223 } 188 }
224} 189}
225 190
226static inline void smp_send_message(cpumask_t callmap, unsigned long type, 191void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
227 void (*func) (void *info), void *info, int wait)
228{ 192{
229 unsigned int cpu; 193 unsigned int cpu;
230 struct ipi_message_queue *msg_queue; 194 struct ipi_data *bfin_ipi_data;
231 struct ipi_message *msg; 195 unsigned long flags;
232 unsigned long flags, next_msg; 196
233 cpumask_t waitmask; /* waitmask is shared by all cpus */ 197 local_irq_save(flags);
234 198
235 cpumask_copy(&waitmask, &callmap); 199 for_each_cpu(cpu, cpumask) {
236 for_each_cpu(cpu, &callmap) { 200 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
237 msg_queue = &per_cpu(ipi_msg_queue, cpu); 201 smp_mb();
238 spin_lock_irqsave(&msg_queue->lock, flags); 202 set_bit(msg, &bfin_ipi_data->bits);
239 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { 203 bfin_ipi_data->count++;
240 next_msg = (msg_queue->head + msg_queue->count)
241 % BFIN_IPI_MSGQ_LEN;
242 msg = &msg_queue->ipi_message[next_msg];
243 msg->type = type;
244 if (type == BFIN_IPI_CALL_FUNC) {
245 msg->call_struct.func = func;
246 msg->call_struct.info = info;
247 msg->call_struct.wait = wait;
248 msg->call_struct.waitmask = &waitmask;
249 }
250 msg_queue->count++;
251 } else
252 panic("IPI message queue overflow\n");
253 spin_unlock_irqrestore(&msg_queue->lock, flags);
254 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); 204 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
255 } 205 }
256 206
257 if (wait) { 207 local_irq_restore(flags);
258 while (!cpumask_empty(&waitmask))
259 blackfin_dcache_invalidate_range(
260 (unsigned long)(&waitmask),
261 (unsigned long)(&waitmask));
262#ifdef __ARCH_SYNC_CORE_DCACHE
263 /*
264 * Invalidate D cache in case shared data was changed by
265 * other processors to ensure cache coherence.
266 */
267 resync_core_dcache();
268#endif
269 }
270} 208}
271 209
272int smp_call_function(void (*func)(void *info), void *info, int wait) 210void arch_send_call_function_single_ipi(int cpu)
273{ 211{
274 cpumask_t callmap; 212 send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
275
276 preempt_disable();
277 cpumask_copy(&callmap, cpu_online_mask);
278 cpumask_clear_cpu(smp_processor_id(), &callmap);
279 if (!cpumask_empty(&callmap))
280 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
281
282 preempt_enable();
283
284 return 0;
285} 213}
286EXPORT_SYMBOL_GPL(smp_call_function);
287 214
288int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 215void arch_send_call_function_ipi_mask(const struct cpumask *mask)
289 int wait)
290{ 216{
291 unsigned int cpu = cpuid; 217 send_ipi(mask, BFIN_IPI_CALL_FUNC);
292 cpumask_t callmap;
293
294 if (cpu_is_offline(cpu))
295 return 0;
296 cpumask_clear(&callmap);
297 cpumask_set_cpu(cpu, &callmap);
298
299 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
300
301 return 0;
302} 218}
303EXPORT_SYMBOL_GPL(smp_call_function_single);
304 219
305void smp_send_reschedule(int cpu) 220void smp_send_reschedule(int cpu)
306{ 221{
307 cpumask_t callmap; 222 send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
308 /* simply trigger an ipi */
309
310 cpumask_clear(&callmap);
311 cpumask_set_cpu(cpu, &callmap);
312
313 smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
314 223
315 return; 224 return;
316} 225}
317 226
318void smp_send_msg(const struct cpumask *mask, unsigned long type) 227void smp_send_msg(const struct cpumask *mask, unsigned long type)
319{ 228{
320 smp_send_message(*mask, type, NULL, NULL, 0); 229 send_ipi(mask, type);
321} 230}
322 231
323void smp_timer_broadcast(const struct cpumask *mask) 232void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
333 cpumask_copy(&callmap, cpu_online_mask); 242 cpumask_copy(&callmap, cpu_online_mask);
334 cpumask_clear_cpu(smp_processor_id(), &callmap); 243 cpumask_clear_cpu(smp_processor_id(), &callmap);
335 if (!cpumask_empty(&callmap)) 244 if (!cpumask_empty(&callmap))
336 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); 245 send_ipi(&callmap, BFIN_IPI_CPU_STOP);
337 246
338 preempt_enable(); 247 preempt_enable();
339 248
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
436void __init smp_prepare_cpus(unsigned int max_cpus) 345void __init smp_prepare_cpus(unsigned int max_cpus)
437{ 346{
438 platform_prepare_cpus(max_cpus); 347 platform_prepare_cpus(max_cpus);
439 ipi_queue_init(); 348 bfin_ipi_init();
440 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); 349 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
441 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); 350 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
442} 351}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a1e9d69a9c90..584b93674ea4 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
169 if (*offset) 169 if (*offset)
170 return -EINVAL; 170 return -EINVAL;
171 retval = oprofilefs_ulong_from_user(&val, buf, count); 171 retval = oprofilefs_ulong_from_user(&val, buf, count);
172 if (retval) 172 if (retval <= 0)
173 return retval; 173 return retval;
174 if (val < oprofile_min_interval) 174 if (val < oprofile_min_interval)
175 oprofile_hw_interval = oprofile_min_interval; 175 oprofile_hw_interval = oprofile_min_interval;
@@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
212 return -EINVAL; 212 return -EINVAL;
213 213
214 retval = oprofilefs_ulong_from_user(&val, buf, count); 214 retval = oprofilefs_ulong_from_user(&val, buf, count);
215 if (retval) 215 if (retval <= 0)
216 return retval; 216 return retval;
217 if (val != 0) 217 if (val != 0)
218 return -EINVAL; 218 return -EINVAL;
@@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
243 return -EINVAL; 243 return -EINVAL;
244 244
245 retval = oprofilefs_ulong_from_user(&val, buf, count); 245 retval = oprofilefs_ulong_from_user(&val, buf, count);
246 if (retval) 246 if (retval <= 0)
247 return retval; 247 return retval;
248 248
249 if (val != 0 && val != 1) 249 if (val != 0 && val != 1)
@@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
278 return -EINVAL; 278 return -EINVAL;
279 279
280 retval = oprofilefs_ulong_from_user(&val, buf, count); 280 retval = oprofilefs_ulong_from_user(&val, buf, count);
281 if (retval) 281 if (retval <= 0)
282 return retval; 282 return retval;
283 283
284 if (val != 0 && val != 1) 284 if (val != 0 && val != 1)
@@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
317 return -EINVAL; 317 return -EINVAL;
318 318
319 retval = oprofilefs_ulong_from_user(&val, buf, count); 319 retval = oprofilefs_ulong_from_user(&val, buf, count);
320 if (retval) 320 if (retval <= 0)
321 return retval; 321 return retval;
322 322
323 if (val != 0 && val != 1) 323 if (val != 0 && val != 1)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 7f2739e03e79..0d3d63afa76a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
2008 break; 2008 break;
2009 2009
2010 case 28: /* Atom */ 2010 case 28: /* Atom */
2011 case 54: /* Cedariew */
2011 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2012 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2012 sizeof(hw_cache_event_ids)); 2013 sizeof(hw_cache_event_ids));
2013 2014
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 520b4265fcd2..da02e9cc3754 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
686 * to have an operational LBR which can freeze 686 * to have an operational LBR which can freeze
687 * on PMU interrupt 687 * on PMU interrupt
688 */ 688 */
689 if (boot_cpu_data.x86_mask < 10) { 689 if (boot_cpu_data.x86_model == 28
690 && boot_cpu_data.x86_mask < 10) {
690 pr_cont("LBR disabled due to erratum"); 691 pr_cont("LBR disabled due to erratum");
691 return; 692 return;
692 } 693 }
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 4873e62db6a1..9e5bcf1e2376 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
225 if (do_microcode_update(buf, len) == 0) 225 if (do_microcode_update(buf, len) == 0)
226 ret = (ssize_t)len; 226 ret = (ssize_t)len;
227 227
228 if (ret > 0)
229 perf_check_microcode();
230
228 mutex_unlock(&microcode_mutex); 231 mutex_unlock(&microcode_mutex);
229 put_online_cpus(); 232 put_online_cpus();
230 233
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index e498b18f010c..9fc9aa7ac703 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
318 if (val & 0x10) { 318 if (val & 0x10) {
319 u8 edge_irr = s->irr & ~s->elcr; 319 u8 edge_irr = s->irr & ~s->elcr;
320 int i; 320 int i;
321 bool found; 321 bool found = false;
322 struct kvm_vcpu *vcpu; 322 struct kvm_vcpu *vcpu;
323 323
324 s->init4 = val & 1; 324 s->init4 = val & 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03de1b79..b1eb202ee76a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
3619 3619
3620static int alloc_apic_access_page(struct kvm *kvm) 3620static int alloc_apic_access_page(struct kvm *kvm)
3621{ 3621{
3622 struct page *page;
3622 struct kvm_userspace_memory_region kvm_userspace_mem; 3623 struct kvm_userspace_memory_region kvm_userspace_mem;
3623 int r = 0; 3624 int r = 0;
3624 3625
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
3633 if (r) 3634 if (r)
3634 goto out; 3635 goto out;
3635 3636
3636 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); 3637 page = gfn_to_page(kvm, 0xfee00);
3638 if (is_error_page(page)) {
3639 r = -EFAULT;
3640 goto out;
3641 }
3642
3643 kvm->arch.apic_access_page = page;
3637out: 3644out:
3638 mutex_unlock(&kvm->slots_lock); 3645 mutex_unlock(&kvm->slots_lock);
3639 return r; 3646 return r;
@@ -3641,6 +3648,7 @@ out:
3641 3648
3642static int alloc_identity_pagetable(struct kvm *kvm) 3649static int alloc_identity_pagetable(struct kvm *kvm)
3643{ 3650{
3651 struct page *page;
3644 struct kvm_userspace_memory_region kvm_userspace_mem; 3652 struct kvm_userspace_memory_region kvm_userspace_mem;
3645 int r = 0; 3653 int r = 0;
3646 3654
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
3656 if (r) 3664 if (r)
3657 goto out; 3665 goto out;
3658 3666
3659 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, 3667 page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3660 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); 3668 if (is_error_page(page)) {
3669 r = -EFAULT;
3670 goto out;
3671 }
3672
3673 kvm->arch.ept_identity_pagetable = page;
3661out: 3674out:
3662 mutex_unlock(&kvm->slots_lock); 3675 mutex_unlock(&kvm->slots_lock);
3663 return r; 3676 return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6575 /* Exposing INVPCID only when PCID is exposed */ 6588 /* Exposing INVPCID only when PCID is exposed */
6576 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); 6589 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
6577 if (vmx_invpcid_supported() && 6590 if (vmx_invpcid_supported() &&
6578 best && (best->ecx & bit(X86_FEATURE_INVPCID)) && 6591 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
6579 guest_cpuid_has_pcid(vcpu)) { 6592 guest_cpuid_has_pcid(vcpu)) {
6580 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; 6593 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
6581 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6594 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6585 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6598 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6586 exec_control); 6599 exec_control);
6587 if (best) 6600 if (best)
6588 best->ecx &= ~bit(X86_FEATURE_INVPCID); 6601 best->ebx &= ~bit(X86_FEATURE_INVPCID);
6589 } 6602 }
6590} 6603}
6591 6604
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 148ed666e311..2966c847d489 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5113 !kvm_event_needs_reinjection(vcpu); 5113 !kvm_event_needs_reinjection(vcpu);
5114} 5114}
5115 5115
5116static void vapic_enter(struct kvm_vcpu *vcpu) 5116static int vapic_enter(struct kvm_vcpu *vcpu)
5117{ 5117{
5118 struct kvm_lapic *apic = vcpu->arch.apic; 5118 struct kvm_lapic *apic = vcpu->arch.apic;
5119 struct page *page; 5119 struct page *page;
5120 5120
5121 if (!apic || !apic->vapic_addr) 5121 if (!apic || !apic->vapic_addr)
5122 return; 5122 return 0;
5123 5123
5124 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5124 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5125 if (is_error_page(page))
5126 return -EFAULT;
5125 5127
5126 vcpu->arch.apic->vapic_page = page; 5128 vcpu->arch.apic->vapic_page = page;
5129 return 0;
5127} 5130}
5128 5131
5129static void vapic_exit(struct kvm_vcpu *vcpu) 5132static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5430 } 5433 }
5431 5434
5432 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5435 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5433 vapic_enter(vcpu); 5436 r = vapic_enter(vcpu);
5437 if (r) {
5438 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5439 return r;
5440 }
5434 5441
5435 r = 1; 5442 r = 1;
5436 while (r > 0) { 5443 while (r > 0) {