aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /arch/arm/include
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/a.out-core.h6
-rw-r--r--arch/arm/include/asm/assembler.h64
-rw-r--r--arch/arm/include/asm/bitops.h126
-rw-r--r--arch/arm/include/asm/cache.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h197
-rw-r--r--arch/arm/include/asm/cachetype.h8
-rw-r--r--arch/arm/include/asm/clkdev.h22
-rw-r--r--arch/arm/include/asm/cpu-multi32.h69
-rw-r--r--arch/arm/include/asm/cpu-single.h44
-rw-r--r--arch/arm/include/asm/cputype.h4
-rw-r--r--arch/arm/include/asm/dma-mapping.h93
-rw-r--r--arch/arm/include/asm/dma.h4
-rw-r--r--arch/arm/include/asm/domain.h31
-rw-r--r--arch/arm/include/asm/elf.h7
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S46
-rw-r--r--arch/arm/include/asm/fiq.h23
-rw-r--r--arch/arm/include/asm/fncpy.h94
-rw-r--r--arch/arm/include/asm/fpstate.h2
-rw-r--r--arch/arm/include/asm/ftrace.h20
-rw-r--r--arch/arm/include/asm/futex.h143
-rw-r--r--arch/arm/include/asm/glue-cache.h146
-rw-r--r--arch/arm/include/asm/glue-df.h110
-rw-r--r--arch/arm/include/asm/glue-pf.h57
-rw-r--r--arch/arm/include/asm/glue-proc.h264
-rw-r--r--arch/arm/include/asm/glue.h142
-rw-r--r--arch/arm/include/asm/hardirq.h18
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h24
-rw-r--r--arch/arm/include/asm/hardware/coresight.h34
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-gic.S75
-rw-r--r--arch/arm/include/asm/hardware/gic.h8
-rw-r--r--arch/arm/include/asm/hardware/icst.h2
-rw-r--r--arch/arm/include/asm/hardware/it8152.h3
-rw-r--r--arch/arm/include/asm/hardware/pl080.h2
-rw-r--r--arch/arm/include/asm/hardware/sp810.h9
-rw-r--r--arch/arm/include/asm/hardware/timer-sp.h2
-rw-r--r--arch/arm/include/asm/highmem.h38
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h133
-rw-r--r--arch/arm/include/asm/hw_irq.h10
-rw-r--r--arch/arm/include/asm/i8253.h15
-rw-r--r--arch/arm/include/asm/io.h47
-rw-r--r--arch/arm/include/asm/ioctls.h83
-rw-r--r--arch/arm/include/asm/irqflags.h145
-rw-r--r--arch/arm/include/asm/kexec.h21
-rw-r--r--arch/arm/include/asm/kgdb.h5
-rw-r--r--arch/arm/include/asm/kprobes.h3
-rw-r--r--arch/arm/include/asm/localtimer.h12
-rw-r--r--arch/arm/include/asm/mach/arch.h29
-rw-r--r--arch/arm/include/asm/mach/irq.h39
-rw-r--r--arch/arm/include/asm/mach/time.h2
-rw-r--r--arch/arm/include/asm/mach/udc_pxa2xx.h2
-rw-r--r--arch/arm/include/asm/memblock.h7
-rw-r--r--arch/arm/include/asm/memory.h85
-rw-r--r--arch/arm/include/asm/mmu.h4
-rw-r--r--arch/arm/include/asm/mmu_context.h29
-rw-r--r--arch/arm/include/asm/module.h51
-rw-r--r--arch/arm/include/asm/outercache.h39
-rw-r--r--arch/arm/include/asm/page.h8
-rw-r--r--arch/arm/include/asm/perf_event.h12
-rw-r--r--arch/arm/include/asm/pgalloc.h52
-rw-r--r--arch/arm/include/asm/pgtable.h309
-rw-r--r--arch/arm/include/asm/pmu.h14
-rw-r--r--arch/arm/include/asm/proc-fns.h306
-rw-r--r--arch/arm/include/asm/processor.h18
-rw-r--r--arch/arm/include/asm/prom.h37
-rw-r--r--arch/arm/include/asm/ptrace.h10
-rw-r--r--arch/arm/include/asm/sched_clock.h120
-rw-r--r--arch/arm/include/asm/seccomp.h11
-rw-r--r--arch/arm/include/asm/setup.h12
-rw-r--r--arch/arm/include/asm/sizes.h42
-rw-r--r--arch/arm/include/asm/smp.h28
-rw-r--r--arch/arm/include/asm/smp_plat.h25
-rw-r--r--arch/arm/include/asm/smp_scu.h7
-rw-r--r--arch/arm/include/asm/smp_twd.h1
-rw-r--r--arch/arm/include/asm/spinlock.h55
-rw-r--r--arch/arm/include/asm/system.h40
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/thread_notify.h1
-rw-r--r--arch/arm/include/asm/tlb.h140
-rw-r--r--arch/arm/include/asm/tlbflush.h43
-rw-r--r--arch/arm/include/asm/tls.h11
-rw-r--r--arch/arm/include/asm/traps.h26
-rw-r--r--arch/arm/include/asm/types.h9
-rw-r--r--arch/arm/include/asm/uaccess.h16
-rw-r--r--arch/arm/include/asm/ucontext.h2
-rw-r--r--arch/arm/include/asm/unistd.h6
-rw-r--r--arch/arm/include/asm/user.h2
86 files changed, 2591 insertions, 1474 deletions
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
index 93d04acaa31f..92f10cb5c70c 100644
--- a/arch/arm/include/asm/a.out-core.h
+++ b/arch/arm/include/asm/a.out-core.h
@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
32 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; 32 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
33 dump->u_ssize = 0; 33 dump->u_ssize = 0;
34 34
35 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; 35 memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
36 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
37 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
38 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
39 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
40 36
41 if (dump->start_stack < 0x04000000) 37 if (dump->start_stack < 0x04000000)
42 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; 38 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6e8f05c8a1c8..65c3f2474f5e 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -13,11 +13,15 @@
13 * Do not include any C declarations in this file - it is included by 13 * Do not include any C declarations in this file - it is included by
14 * assembler source. 14 * assembler source.
15 */ 15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
16#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
17#error "Only include this from assembly code" 20#error "Only include this from assembly code"
18#endif 21#endif
19 22
20#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/domain.h>
21 25
22/* 26/*
23 * Endian independent macros for shifting bytes within registers. 27 * Endian independent macros for shifting bytes within registers.
@@ -154,16 +158,55 @@
154 .long 9999b,9001f; \ 158 .long 9999b,9001f; \
155 .popsection 159 .popsection
156 160
161#ifdef CONFIG_SMP
162#define ALT_SMP(instr...) \
1639998: instr
164/*
165 * Note: if you get assembler errors from ALT_UP() when building with
166 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
167 * ALT_SMP( W(instr) ... )
168 */
169#define ALT_UP(instr...) \
170 .pushsection ".alt.smp.init", "a" ;\
171 .long 9998b ;\
1729997: instr ;\
173 .if . - 9997b != 4 ;\
174 .error "ALT_UP() content must assemble to exactly 4 bytes";\
175 .endif ;\
176 .popsection
177#define ALT_UP_B(label) \
178 .equ up_b_offset, label - 9998b ;\
179 .pushsection ".alt.smp.init", "a" ;\
180 .long 9998b ;\
181 W(b) . + up_b_offset ;\
182 .popsection
183#else
184#define ALT_SMP(instr...)
185#define ALT_UP(instr...) instr
186#define ALT_UP_B(label) b label
187#endif
188
157/* 189/*
158 * SMP data memory barrier 190 * SMP data memory barrier
159 */ 191 */
160 .macro smp_dmb 192 .macro smp_dmb mode
161#ifdef CONFIG_SMP 193#ifdef CONFIG_SMP
162#if __LINUX_ARM_ARCH__ >= 7 194#if __LINUX_ARM_ARCH__ >= 7
163 dmb 195 .ifeqs "\mode","arm"
196 ALT_SMP(dmb)
197 .else
198 ALT_SMP(W(dmb))
199 .endif
164#elif __LINUX_ARM_ARCH__ == 6 200#elif __LINUX_ARM_ARCH__ == 6
165 mcr p15, 0, r0, c7, c10, 5 @ dmb 201 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
202#else
203#error Incompatible SMP platform
166#endif 204#endif
205 .ifeqs "\mode","arm"
206 ALT_UP(nop)
207 .else
208 ALT_UP(W(nop))
209 .endif
167#endif 210#endif
168 .endm 211 .endm
169 212
@@ -183,12 +226,12 @@
183 */ 226 */
184#ifdef CONFIG_THUMB2_KERNEL 227#ifdef CONFIG_THUMB2_KERNEL
185 228
186 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort 229 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
1879999: 2309999:
188 .if \inc == 1 231 .if \inc == 1
189 \instr\cond\()bt \reg, [\ptr, #\off] 232 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
190 .elseif \inc == 4 233 .elseif \inc == 4
191 \instr\cond\()t \reg, [\ptr, #\off] 234 \instr\cond\()\t\().w \reg, [\ptr, #\off]
192 .else 235 .else
193 .error "Unsupported inc macro argument" 236 .error "Unsupported inc macro argument"
194 .endif 237 .endif
@@ -215,7 +258,7 @@
215 @ Slightly optimised to avoid incrementing the pointer twice 258 @ Slightly optimised to avoid incrementing the pointer twice
216 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 259 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
217 .if \rept == 2 260 .if \rept == 2
218 usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort 261 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
219 .endif 262 .endif
220 263
221 add\cond \ptr, #\rept * \inc 264 add\cond \ptr, #\rept * \inc
@@ -223,13 +266,13 @@
223 266
224#else /* !CONFIG_THUMB2_KERNEL */ 267#else /* !CONFIG_THUMB2_KERNEL */
225 268
226 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 269 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
227 .rept \rept 270 .rept \rept
2289999: 2719999:
229 .if \inc == 1 272 .if \inc == 1
230 \instr\cond\()bt \reg, [\ptr], #\inc 273 \instr\cond\()b\()\t \reg, [\ptr], #\inc
231 .elseif \inc == 4 274 .elseif \inc == 4
232 \instr\cond\()t \reg, [\ptr], #\inc 275 \instr\cond\()\t \reg, [\ptr], #\inc
233 .else 276 .else
234 .error "Unsupported inc macro argument" 277 .error "Unsupported inc macro argument"
235 .endif 278 .endif
@@ -250,3 +293,4 @@
250 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 293 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
251 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 294 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
252 .endm 295 .endm
296#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 338ff19ae447..b4892a06442c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -149,14 +149,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
149 */ 149 */
150 150
151/* 151/*
152 * Native endian assembly bitops. nr = 0 -> word 0 bit 0.
153 */
154extern void _set_bit(int nr, volatile unsigned long * p);
155extern void _clear_bit(int nr, volatile unsigned long * p);
156extern void _change_bit(int nr, volatile unsigned long * p);
157extern int _test_and_set_bit(int nr, volatile unsigned long * p);
158extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
159extern int _test_and_change_bit(int nr, volatile unsigned long * p);
160
161/*
152 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 162 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
153 */ 163 */
154extern void _set_bit_le(int nr, volatile unsigned long * p);
155extern void _clear_bit_le(int nr, volatile unsigned long * p);
156extern void _change_bit_le(int nr, volatile unsigned long * p);
157extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
158extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
159extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
160extern int _find_first_zero_bit_le(const void * p, unsigned size); 164extern int _find_first_zero_bit_le(const void * p, unsigned size);
161extern int _find_next_zero_bit_le(const void * p, int size, int offset); 165extern int _find_next_zero_bit_le(const void * p, int size, int offset);
162extern int _find_first_bit_le(const unsigned long *p, unsigned size); 166extern int _find_first_bit_le(const unsigned long *p, unsigned size);
@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
165/* 169/*
166 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. 170 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
167 */ 171 */
168extern void _set_bit_be(int nr, volatile unsigned long * p);
169extern void _clear_bit_be(int nr, volatile unsigned long * p);
170extern void _change_bit_be(int nr, volatile unsigned long * p);
171extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
172extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
173extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
174extern int _find_first_zero_bit_be(const void * p, unsigned size); 172extern int _find_first_zero_bit_be(const void * p, unsigned size);
175extern int _find_next_zero_bit_be(const void * p, int size, int offset); 173extern int _find_next_zero_bit_be(const void * p, int size, int offset);
176extern int _find_first_bit_be(const unsigned long *p, unsigned size); 174extern int _find_first_bit_be(const unsigned long *p, unsigned size);
@@ -180,58 +178,40 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
180/* 178/*
181 * The __* form of bitops are non-atomic and may be reordered. 179 * The __* form of bitops are non-atomic and may be reordered.
182 */ 180 */
183#define ATOMIC_BITOP_LE(name,nr,p) \ 181#define ATOMIC_BITOP(name,nr,p) \
184 (__builtin_constant_p(nr) ? \ 182 (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
185 ____atomic_##name(nr, p) : \
186 _##name##_le(nr,p))
187
188#define ATOMIC_BITOP_BE(name,nr,p) \
189 (__builtin_constant_p(nr) ? \
190 ____atomic_##name(nr, p) : \
191 _##name##_be(nr,p))
192#else 183#else
193#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) 184#define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
194#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p)
195#endif 185#endif
196 186
197#define NONATOMIC_BITOP(name,nr,p) \ 187/*
198 (____nonatomic_##name(nr, p)) 188 * Native endian atomic definitions.
189 */
190#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
191#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
192#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
193#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
194#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
195#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
199 196
200#ifndef __ARMEB__ 197#ifndef __ARMEB__
201/* 198/*
202 * These are the little endian, atomic definitions. 199 * These are the little endian, atomic definitions.
203 */ 200 */
204#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p)
205#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p)
206#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p)
207#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
208#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
209#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
210#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) 201#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
211#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) 202#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
212#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 203#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
213#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) 204#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
214 205
215#define WORD_BITOFF_TO_LE(x) ((x))
216
217#else 206#else
218
219/* 207/*
220 * These are the big endian, atomic definitions. 208 * These are the big endian, atomic definitions.
221 */ 209 */
222#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p)
223#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p)
224#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p)
225#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
226#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
227#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
228#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) 210#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
229#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) 211#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
230#define find_first_bit(p,sz) _find_first_bit_be(p,sz) 212#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
231#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) 213#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
232 214
233#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18)
234
235#endif 215#endif
236 216
237#if __LINUX_ARM_ARCH__ < 5 217#if __LINUX_ARM_ARCH__ < 5
@@ -285,7 +265,7 @@ static inline int fls(int x)
285 if (__builtin_constant_p(x)) 265 if (__builtin_constant_p(x))
286 return constant_fls(x); 266 return constant_fls(x);
287 267
288 asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc"); 268 asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
289 ret = 32 - ret; 269 ret = 32 - ret;
290 return ret; 270 return ret;
291} 271}
@@ -303,41 +283,37 @@ static inline int fls(int x)
303#include <asm-generic/bitops/hweight.h> 283#include <asm-generic/bitops/hweight.h>
304#include <asm-generic/bitops/lock.h> 284#include <asm-generic/bitops/lock.h>
305 285
306/* 286#ifdef __ARMEB__
307 * Ext2 is defined to use little-endian byte ordering. 287
308 * These do not need to be atomic. 288static inline int find_first_zero_bit_le(const void *p, unsigned size)
309 */ 289{
310#define ext2_set_bit(nr,p) \ 290 return _find_first_zero_bit_le(p, size);
311 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 291}
312#define ext2_set_bit_atomic(lock,nr,p) \ 292#define find_first_zero_bit_le find_first_zero_bit_le
313 test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 293
314#define ext2_clear_bit(nr,p) \ 294static inline int find_next_zero_bit_le(const void *p, int size, int offset)
315 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 295{
316#define ext2_clear_bit_atomic(lock,nr,p) \ 296 return _find_next_zero_bit_le(p, size, offset);
317 test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 297}
318#define ext2_test_bit(nr,p) \ 298#define find_next_zero_bit_le find_next_zero_bit_le
319 test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 299
320#define ext2_find_first_zero_bit(p,sz) \ 300static inline int find_next_bit_le(const void *p, int size, int offset)
321 _find_first_zero_bit_le(p,sz) 301{
322#define ext2_find_next_zero_bit(p,sz,off) \ 302 return _find_next_bit_le(p, size, offset);
323 _find_next_zero_bit_le(p,sz,off) 303}
324#define ext2_find_next_bit(p, sz, off) \ 304#define find_next_bit_le find_next_bit_le
325 _find_next_bit_le(p, sz, off) 305
306#endif
307
308#include <asm-generic/bitops/le.h>
326 309
327/* 310/*
328 * Minix is defined to use little-endian byte ordering. 311 * Ext2 is defined to use little-endian byte ordering.
329 * These do not need to be atomic.
330 */ 312 */
331#define minix_set_bit(nr,p) \ 313#define ext2_set_bit_atomic(lock, nr, p) \
332 __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 314 test_and_set_bit_le(nr, p)
333#define minix_test_bit(nr,p) \ 315#define ext2_clear_bit_atomic(lock, nr, p) \
334 test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) 316 test_and_clear_bit_le(nr, p)
335#define minix_test_and_set_bit(nr,p) \
336 __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
337#define minix_test_and_clear_bit(nr,p) \
338 __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p))
339#define minix_find_first_zero_bit(p,sz) \
340 _find_first_zero_bit_le(p,sz)
341 317
342#endif /* __KERNEL__ */ 318#endif /* __KERNEL__ */
343 319
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 9d6122096fbe..75fe66bc02b4 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -23,4 +23,6 @@
23#define ARCH_SLAB_MINALIGN 8 23#define ARCH_SLAB_MINALIGN 8
24#endif 24#endif
25 25
26#define __read_mostly __attribute__((__section__(".data..read_mostly")))
27
26#endif 28#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4656a24058d2..d5d8d5c72682 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/mm.h> 13#include <linux/mm.h>
14 14
15#include <asm/glue.h> 15#include <asm/glue-cache.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/cachetype.h> 17#include <asm/cachetype.h>
18#include <asm/outercache.h> 18#include <asm/outercache.h>
@@ -20,127 +20,10 @@
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21 21
22/* 22/*
23 * Cache Model 23 * This flag is used to indicate that the page pointed to by a pte is clean
24 * =========== 24 * and does not require cleaning before returning it to the user.
25 */ 25 */
26#undef _CACHE 26#define PG_dcache_clean PG_arch_1
27#undef MULTI_CACHE
28
29#if defined(CONFIG_CPU_CACHE_V3)
30# ifdef _CACHE
31# define MULTI_CACHE 1
32# else
33# define _CACHE v3
34# endif
35#endif
36
37#if defined(CONFIG_CPU_CACHE_V4)
38# ifdef _CACHE
39# define MULTI_CACHE 1
40# else
41# define _CACHE v4
42# endif
43#endif
44
45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
46 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
47 defined(CONFIG_CPU_ARM1026)
48# define MULTI_CACHE 1
49#endif
50
51#if defined(CONFIG_CPU_FA526)
52# ifdef _CACHE
53# define MULTI_CACHE 1
54# else
55# define _CACHE fa
56# endif
57#endif
58
59#if defined(CONFIG_CPU_ARM926T)
60# ifdef _CACHE
61# define MULTI_CACHE 1
62# else
63# define _CACHE arm926
64# endif
65#endif
66
67#if defined(CONFIG_CPU_ARM940T)
68# ifdef _CACHE
69# define MULTI_CACHE 1
70# else
71# define _CACHE arm940
72# endif
73#endif
74
75#if defined(CONFIG_CPU_ARM946E)
76# ifdef _CACHE
77# define MULTI_CACHE 1
78# else
79# define _CACHE arm946
80# endif
81#endif
82
83#if defined(CONFIG_CPU_CACHE_V4WB)
84# ifdef _CACHE
85# define MULTI_CACHE 1
86# else
87# define _CACHE v4wb
88# endif
89#endif
90
91#if defined(CONFIG_CPU_XSCALE)
92# ifdef _CACHE
93# define MULTI_CACHE 1
94# else
95# define _CACHE xscale
96# endif
97#endif
98
99#if defined(CONFIG_CPU_XSC3)
100# ifdef _CACHE
101# define MULTI_CACHE 1
102# else
103# define _CACHE xsc3
104# endif
105#endif
106
107#if defined(CONFIG_CPU_MOHAWK)
108# ifdef _CACHE
109# define MULTI_CACHE 1
110# else
111# define _CACHE mohawk
112# endif
113#endif
114
115#if defined(CONFIG_CPU_FEROCEON)
116# define MULTI_CACHE 1
117#endif
118
119#if defined(CONFIG_CPU_V6)
120//# ifdef _CACHE
121# define MULTI_CACHE 1
122//# else
123//# define _CACHE v6
124//# endif
125#endif
126
127#if defined(CONFIG_CPU_V7)
128//# ifdef _CACHE
129# define MULTI_CACHE 1
130//# else
131//# define _CACHE v7
132//# endif
133#endif
134
135#if !defined(_CACHE) && !defined(MULTI_CACHE)
136#error Unknown cache maintainence model
137#endif
138
139/*
140 * This flag is used to indicate that the page pointed to by a pte
141 * is dirty and requires cleaning before returning it to the user.
142 */
143#define PG_dcache_dirty PG_arch_1
144 27
145/* 28/*
146 * MM Cache Management 29 * MM Cache Management
@@ -156,6 +39,12 @@
156 * Please note that the implementation of these, and the required 39 * Please note that the implementation of these, and the required
157 * effects are cache-type (VIVT/VIPT/PIPT) specific. 40 * effects are cache-type (VIVT/VIPT/PIPT) specific.
158 * 41 *
42 * flush_icache_all()
43 *
44 * Unconditionally clean and invalidate the entire icache.
45 * Currently only needed for cache-v6.S and cache-v7.S, see
46 * __flush_icache_all for the generic implementation.
47 *
159 * flush_kern_all() 48 * flush_kern_all()
160 * 49 *
161 * Unconditionally clean and invalidate the entire cache. 50 * Unconditionally clean and invalidate the entire cache.
@@ -206,6 +95,7 @@
206 */ 95 */
207 96
208struct cpu_cache_fns { 97struct cpu_cache_fns {
98 void (*flush_icache_all)(void);
209 void (*flush_kern_all)(void); 99 void (*flush_kern_all)(void);
210 void (*flush_user_all)(void); 100 void (*flush_user_all)(void);
211 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 101 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -227,6 +117,7 @@ struct cpu_cache_fns {
227 117
228extern struct cpu_cache_fns cpu_cache; 118extern struct cpu_cache_fns cpu_cache;
229 119
120#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
230#define __cpuc_flush_kern_all cpu_cache.flush_kern_all 121#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
231#define __cpuc_flush_user_all cpu_cache.flush_user_all 122#define __cpuc_flush_user_all cpu_cache.flush_user_all
232#define __cpuc_flush_user_range cpu_cache.flush_user_range 123#define __cpuc_flush_user_range cpu_cache.flush_user_range
@@ -241,18 +132,12 @@ extern struct cpu_cache_fns cpu_cache;
241 * visible to the CPU. 132 * visible to the CPU.
242 */ 133 */
243#define dmac_map_area cpu_cache.dma_map_area 134#define dmac_map_area cpu_cache.dma_map_area
244#define dmac_unmap_area cpu_cache.dma_unmap_area 135#define dmac_unmap_area cpu_cache.dma_unmap_area
245#define dmac_flush_range cpu_cache.dma_flush_range 136#define dmac_flush_range cpu_cache.dma_flush_range
246 137
247#else 138#else
248 139
249#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 140extern void __cpuc_flush_icache_all(void);
250#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
251#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
252#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
253#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
254#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
255
256extern void __cpuc_flush_kern_all(void); 141extern void __cpuc_flush_kern_all(void);
257extern void __cpuc_flush_user_all(void); 142extern void __cpuc_flush_user_all(void);
258extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 143extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
@@ -266,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
266 * is visible to DMA, or data written by DMA to system memory is 151 * is visible to DMA, or data written by DMA to system memory is
267 * visible to the CPU. 152 * visible to the CPU.
268 */ 153 */
269#define dmac_map_area __glue(_CACHE,_dma_map_area)
270#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
271#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
272
273extern void dmac_map_area(const void *, size_t, int); 154extern void dmac_map_area(const void *, size_t, int);
274extern void dmac_unmap_area(const void *, size_t, int); 155extern void dmac_unmap_area(const void *, size_t, int);
275extern void dmac_flush_range(const void *, const void *); 156extern void dmac_flush_range(const void *, const void *);
@@ -291,6 +172,38 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
291/* 172/*
292 * Convert calls to our calling convention. 173 * Convert calls to our calling convention.
293 */ 174 */
175
176/* Invalidate I-cache */
177#define __flush_icache_all_generic() \
178 asm("mcr p15, 0, %0, c7, c5, 0" \
179 : : "r" (0));
180
181/* Invalidate I-cache inner shareable */
182#define __flush_icache_all_v7_smp() \
183 asm("mcr p15, 0, %0, c7, c1, 0" \
184 : : "r" (0));
185
186/*
187 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
188 * will fall through to use __flush_icache_all_generic.
189 */
190#if (defined(CONFIG_CPU_V7) && \
191 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
192 defined(CONFIG_SMP_ON_UP)
193#define __flush_icache_preferred __cpuc_flush_icache_all
194#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
195#define __flush_icache_preferred __flush_icache_all_v7_smp
196#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
197#define __flush_icache_preferred __cpuc_flush_icache_all
198#else
199#define __flush_icache_preferred __flush_icache_all_generic
200#endif
201
202static inline void __flush_icache_all(void)
203{
204 __flush_icache_preferred();
205}
206
294#define flush_cache_all() __cpuc_flush_kern_all() 207#define flush_cache_all() __cpuc_flush_kern_all()
295 208
296static inline void vivt_flush_cache_mm(struct mm_struct *mm) 209static inline void vivt_flush_cache_mm(struct mm_struct *mm)
@@ -366,21 +279,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
366#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 279#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
367extern void flush_dcache_page(struct page *); 280extern void flush_dcache_page(struct page *);
368 281
369static inline void __flush_icache_all(void)
370{
371#ifdef CONFIG_ARM_ERRATA_411920
372 extern void v6_icache_inval_all(void);
373 v6_icache_inval_all();
374#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
375 asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
376 :
377 : "r" (0));
378#else
379 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
380 :
381 : "r" (0));
382#endif
383}
384static inline void flush_kernel_vmap_range(void *addr, int size) 282static inline void flush_kernel_vmap_range(void *addr, int size)
385{ 283{
386 if ((cache_is_vivt() || cache_is_vipt_aliasing())) 284 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -405,9 +303,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
405#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 303#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
406static inline void flush_kernel_dcache_page(struct page *page) 304static inline void flush_kernel_dcache_page(struct page *page)
407{ 305{
408 /* highmem pages are always flushed upon kunmap already */
409 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
410 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
411} 306}
412 307
413#define flush_dcache_mmap_lock(mapping) \ 308#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index d3a4c2cb9f2f..c023db09fcc1 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -6,6 +6,7 @@
6#define CACHEID_VIPT_ALIASING (1 << 2) 6#define CACHEID_VIPT_ALIASING (1 << 2)
7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) 7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
8#define CACHEID_ASID_TAGGED (1 << 3) 8#define CACHEID_ASID_TAGGED (1 << 3)
9#define CACHEID_VIPT_I_ALIASING (1 << 4)
9 10
10extern unsigned int cacheid; 11extern unsigned int cacheid;
11 12
@@ -14,15 +15,18 @@ extern unsigned int cacheid;
14#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) 15#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
15#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) 16#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
16#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) 17#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
18#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
17 19
18/* 20/*
19 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture 21 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
20 * Mask out support which will never be present on newer CPUs. 22 * Mask out support which will never be present on newer CPUs.
21 * - v6+ is never VIVT 23 * - v6+ is never VIVT
22 * - v7+ VIPT never aliases 24 * - v7+ VIPT never aliases on D-side
23 */ 25 */
24#if __LINUX_ARM_ARCH__ >= 7 26#if __LINUX_ARM_ARCH__ >= 7
25#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED) 27#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
28 CACHEID_ASID_TAGGED |\
29 CACHEID_VIPT_I_ALIASING)
26#elif __LINUX_ARM_ARCH__ >= 6 30#elif __LINUX_ARM_ARCH__ >= 6
27#define __CACHEID_ARCH_MIN (~CACHEID_VIVT) 31#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
28#else 32#else
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b56c1389b6fa..765d33222369 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -12,23 +12,13 @@
12#ifndef __ASM_CLKDEV_H 12#ifndef __ASM_CLKDEV_H
13#define __ASM_CLKDEV_H 13#define __ASM_CLKDEV_H
14 14
15struct clk; 15#include <linux/slab.h>
16struct device;
17 16
18struct clk_lookup { 17#include <mach/clkdev.h>
19 struct list_head node;
20 const char *dev_id;
21 const char *con_id;
22 struct clk *clk;
23};
24 18
25struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 19static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
26 const char *dev_fmt, ...); 20{
27 21 return kzalloc(size, GFP_KERNEL);
28void clkdev_add(struct clk_lookup *cl); 22}
29void clkdev_drop(struct clk_lookup *cl);
30
31void clkdev_add_table(struct clk_lookup *, size_t);
32int clk_add_alias(const char *, const char *, char *, struct device *);
33 23
34#endif 24#endif
diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h
deleted file mode 100644
index e2b5b0b2116a..000000000000
--- a/arch/arm/include/asm/cpu-multi32.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * arch/arm/include/asm/cpu-multi32.h
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <asm/page.h>
11
12struct mm_struct;
13
14/*
15 * Don't change this structure - ASM code
16 * relies on it.
17 */
18extern struct processor {
19 /* MISC
20 * get data abort address/flags
21 */
22 void (*_data_abort)(unsigned long pc);
23 /*
24 * Retrieve prefetch fault address
25 */
26 unsigned long (*_prefetch_abort)(unsigned long lr);
27 /*
28 * Set up any processor specifics
29 */
30 void (*_proc_init)(void);
31 /*
32 * Disable any processor specifics
33 */
34 void (*_proc_fin)(void);
35 /*
36 * Special stuff for a reset
37 */
38 void (*reset)(unsigned long addr) __attribute__((noreturn));
39 /*
40 * Idle the processor
41 */
42 int (*_do_idle)(void);
43 /*
44 * Processor architecture specific
45 */
46 /*
47 * clean a virtual address range from the
48 * D-cache without flushing the cache.
49 */
50 void (*dcache_clean_area)(void *addr, int size);
51
52 /*
53 * Set the page table
54 */
55 void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
56 /*
57 * Set a possibly extended PTE. Non-extended PTEs should
58 * ignore 'ext'.
59 */
60 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
61} processor;
62
63#define cpu_proc_init() processor._proc_init()
64#define cpu_proc_fin() processor._proc_fin()
65#define cpu_reset(addr) processor.reset(addr)
66#define cpu_do_idle() processor._do_idle()
67#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
68#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
69#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h
deleted file mode 100644
index f073a6d2a406..000000000000
--- a/arch/arm/include/asm/cpu-single.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * arch/arm/include/asm/cpu-single.h
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10/*
11 * Single CPU
12 */
13#ifdef __STDC__
14#define __catify_fn(name,x) name##x
15#else
16#define __catify_fn(name,x) name/**/x
17#endif
18#define __cpu_fn(name,x) __catify_fn(name,x)
19
20/*
21 * If we are supporting multiple CPUs, then we must use a table of
22 * function pointers for this lot. Otherwise, we can optimise the
23 * table away.
24 */
25#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
26#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
27#define cpu_reset __cpu_fn(CPU_NAME,_reset)
28#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
29#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
30#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm)
31#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext)
32
33#include <asm/page.h>
34
35struct mm_struct;
36
37/* declare all the functions as extern */
38extern void cpu_proc_init(void);
39extern void cpu_proc_fin(void);
40extern int cpu_do_idle(void);
41extern void cpu_dcache_clean_area(void *, int);
42extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
43extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
44extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 20ae96cc0020..cd4458f64171 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -2,6 +2,7 @@
2#define __ASM_ARM_CPUTYPE_H 2#define __ASM_ARM_CPUTYPE_H
3 3
4#include <linux/stringify.h> 4#include <linux/stringify.h>
5#include <linux/kernel.h>
5 6
6#define CPUID_ID 0 7#define CPUID_ID 0
7#define CPUID_CACHETYPE 1 8#define CPUID_CACHETYPE 1
@@ -23,6 +24,8 @@
23#define CPUID_EXT_ISAR4 "c2, 4" 24#define CPUID_EXT_ISAR4 "c2, 4"
24#define CPUID_EXT_ISAR5 "c2, 5" 25#define CPUID_EXT_ISAR5 "c2, 5"
25 26
27extern unsigned int processor_id;
28
26#ifdef CONFIG_CPU_CP15 29#ifdef CONFIG_CPU_CP15
27#define read_cpuid(reg) \ 30#define read_cpuid(reg) \
28 ({ \ 31 ({ \
@@ -43,7 +46,6 @@
43 __val; \ 46 __val; \
44 }) 47 })
45#else 48#else
46extern unsigned int processor_id;
47#define read_cpuid(reg) (processor_id) 49#define read_cpuid(reg) (processor_id)
48#define read_cpuid_ext(reg) 0 50#define read_cpuid_ext(reg) 0
49#endif 51#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c568da7dcae4..4fff837363ed 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,24 +5,29 @@
5 5
6#include <linux/mm_types.h> 6#include <linux/mm_types.h>
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
8 9
9#include <asm-generic/dma-coherent.h> 10#include <asm-generic/dma-coherent.h>
10#include <asm/memory.h> 11#include <asm/memory.h>
11 12
13#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
12/* 17/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions 18 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
14 * used internally by the DMA-mapping API to provide DMA addresses. They 19 * functions used internally by the DMA-mapping API to provide DMA
15 * must not be used by drivers. 20 * addresses. They must not be used by drivers.
16 */ 21 */
17#ifndef __arch_page_to_dma 22#ifndef __arch_pfn_to_dma
18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 23static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
19{ 24{
20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 25 return (dma_addr_t)__pfn_to_bus(pfn);
21} 26}
22 27
23static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 28static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
24{ 29{
25 return pfn_to_page(__bus_to_pfn(addr)); 30 return __bus_to_pfn(addr);
26} 31}
27 32
28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
36} 41}
37#else 42#else
38static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 43static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
39{ 44{
40 return __arch_page_to_dma(dev, page); 45 return __arch_pfn_to_dma(dev, pfn);
41} 46}
42 47
43static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) 48static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
44{ 49{
45 return __arch_dma_to_page(dev, addr); 50 return __arch_dma_to_pfn(dev, addr);
46} 51}
47 52
48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 53static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
293/* 298/*
294 * The DMA API, implemented by dmabounce.c. See below for descriptions. 299 * The DMA API, implemented by dmabounce.c. See below for descriptions.
295 */ 300 */
296extern dma_addr_t dma_map_single(struct device *, void *, size_t, 301extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
297 enum dma_data_direction); 302 enum dma_data_direction);
298extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 303extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
299 enum dma_data_direction); 304 enum dma_data_direction);
300extern dma_addr_t dma_map_page(struct device *, struct page *, 305extern dma_addr_t __dma_map_page(struct device *, struct page *,
301 unsigned long, size_t, enum dma_data_direction); 306 unsigned long, size_t, enum dma_data_direction);
302extern void dma_unmap_page(struct device *, dma_addr_t, size_t, 307extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
303 enum dma_data_direction); 308 enum dma_data_direction);
304 309
305/* 310/*
@@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
323} 328}
324 329
325 330
331static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
332 size_t size, enum dma_data_direction dir)
333{
334 __dma_single_cpu_to_dev(cpu_addr, size, dir);
335 return virt_to_dma(dev, cpu_addr);
336}
337
338static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
339 unsigned long offset, size_t size, enum dma_data_direction dir)
340{
341 __dma_page_cpu_to_dev(page, offset, size, dir);
342 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
343}
344
345static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
346 size_t size, enum dma_data_direction dir)
347{
348 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
349}
350
351static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
352 size_t size, enum dma_data_direction dir)
353{
354 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
355 handle & ~PAGE_MASK, size, dir);
356}
357#endif /* CONFIG_DMABOUNCE */
358
326/** 359/**
327 * dma_map_single - map a single buffer for streaming DMA 360 * dma_map_single - map a single buffer for streaming DMA
328 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 361 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
340static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 373static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
341 size_t size, enum dma_data_direction dir) 374 size_t size, enum dma_data_direction dir)
342{ 375{
376 dma_addr_t addr;
377
343 BUG_ON(!valid_dma_direction(dir)); 378 BUG_ON(!valid_dma_direction(dir));
344 379
345 __dma_single_cpu_to_dev(cpu_addr, size, dir); 380 addr = __dma_map_single(dev, cpu_addr, size, dir);
381 debug_dma_map_page(dev, virt_to_page(cpu_addr),
382 (unsigned long)cpu_addr & ~PAGE_MASK, size,
383 dir, addr, true);
346 384
347 return virt_to_dma(dev, cpu_addr); 385 return addr;
348} 386}
349 387
350/** 388/**
@@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
364static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 402static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
365 unsigned long offset, size_t size, enum dma_data_direction dir) 403 unsigned long offset, size_t size, enum dma_data_direction dir)
366{ 404{
405 dma_addr_t addr;
406
367 BUG_ON(!valid_dma_direction(dir)); 407 BUG_ON(!valid_dma_direction(dir));
368 408
369 __dma_page_cpu_to_dev(page, offset, size, dir); 409 addr = __dma_map_page(dev, page, offset, size, dir);
410 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
370 411
371 return page_to_dma(dev, page) + offset; 412 return addr;
372} 413}
373 414
374/** 415/**
@@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
388static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 429static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
389 size_t size, enum dma_data_direction dir) 430 size_t size, enum dma_data_direction dir)
390{ 431{
391 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); 432 debug_dma_unmap_page(dev, handle, size, dir, true);
433 __dma_unmap_single(dev, handle, size, dir);
392} 434}
393 435
394/** 436/**
@@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
408static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 450static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
409 size_t size, enum dma_data_direction dir) 451 size_t size, enum dma_data_direction dir)
410{ 452{
411 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, 453 debug_dma_unmap_page(dev, handle, size, dir, false);
412 size, dir); 454 __dma_unmap_page(dev, handle, size, dir);
413} 455}
414#endif /* CONFIG_DMABOUNCE */
415 456
416/** 457/**
417 * dma_sync_single_range_for_cpu 458 * dma_sync_single_range_for_cpu
@@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
437{ 478{
438 BUG_ON(!valid_dma_direction(dir)); 479 BUG_ON(!valid_dma_direction(dir));
439 480
481 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
482
440 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) 483 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
441 return; 484 return;
442 485
@@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
449{ 492{
450 BUG_ON(!valid_dma_direction(dir)); 493 BUG_ON(!valid_dma_direction(dir));
451 494
495 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
496
452 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 497 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
453 return; 498 return;
454 499
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index ca51143f97f1..42005542932b 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -6,8 +6,10 @@
6/* 6/*
7 * This is the maximum virtual address which can be DMA'd from. 7 * This is the maximum virtual address which can be DMA'd from.
8 */ 8 */
9#ifndef MAX_DMA_ADDRESS 9#ifndef ARM_DMA_ZONE_SIZE
10#define MAX_DMA_ADDRESS 0xffffffff 10#define MAX_DMA_ADDRESS 0xffffffff
11#else
12#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE)
11#endif 13#endif
12 14
13#ifdef CONFIG_ISA_DMA_API 15#ifdef CONFIG_ISA_DMA_API
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index cc7ef4080711..af18ceaacf5d 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -45,13 +45,17 @@
45 */ 45 */
46#define DOMAIN_NOACCESS 0 46#define DOMAIN_NOACCESS 0
47#define DOMAIN_CLIENT 1 47#define DOMAIN_CLIENT 1
48#ifdef CONFIG_CPU_USE_DOMAINS
48#define DOMAIN_MANAGER 3 49#define DOMAIN_MANAGER 3
50#else
51#define DOMAIN_MANAGER 1
52#endif
49 53
50#define domain_val(dom,type) ((type) << (2*(dom))) 54#define domain_val(dom,type) ((type) << (2*(dom)))
51 55
52#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
53 57
54#ifdef CONFIG_MMU 58#ifdef CONFIG_CPU_USE_DOMAINS
55#define set_domain(x) \ 59#define set_domain(x) \
56 do { \ 60 do { \
57 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
@@ -74,5 +78,28 @@
74#define modify_domain(dom,type) do { } while (0) 78#define modify_domain(dom,type) do { } while (0)
75#endif 79#endif
76 80
81/*
82 * Generate the T (user) versions of the LDR/STR and related
83 * instructions (inline assembly)
84 */
85#ifdef CONFIG_CPU_USE_DOMAINS
86#define T(instr) #instr "t"
87#else
88#define T(instr) #instr
77#endif 89#endif
78#endif /* !__ASSEMBLY__ */ 90
91#else /* __ASSEMBLY__ */
92
93/*
94 * Generate the T (user) versions of the LDR/STR and related
95 * instructions
96 */
97#ifdef CONFIG_CPU_USE_DOMAINS
98#define T(instr) instr ## t
99#else
100#define T(instr) instr
101#endif
102
103#endif /* __ASSEMBLY__ */
104
105#endif /* !__ASM_PROC_DOMAIN_H */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 5747a8baa413..0e9ce8d9686e 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -99,6 +99,8 @@ struct elf32_hdr;
99extern int elf_check_arch(const struct elf32_hdr *); 99extern int elf_check_arch(const struct elf32_hdr *);
100#define elf_check_arch elf_check_arch 100#define elf_check_arch elf_check_arch
101 101
102#define vmcore_elf64_check_arch(x) (0)
103
102extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); 104extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
103#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) 105#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
104 106
@@ -106,6 +108,7 @@ struct task_struct;
106int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); 108int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
107#define ELF_CORE_COPY_TASK_REGS dump_task_regs 109#define ELF_CORE_COPY_TASK_REGS dump_task_regs
108 110
111#define CORE_DUMP_USE_REGSET
109#define ELF_EXEC_PAGESIZE 4096 112#define ELF_EXEC_PAGESIZE 4096
110 113
111/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 114/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
@@ -127,4 +130,8 @@ struct mm_struct;
127extern unsigned long arch_randomize_brk(struct mm_struct *mm); 130extern unsigned long arch_randomize_brk(struct mm_struct *mm);
128#define arch_randomize_brk arch_randomize_brk 131#define arch_randomize_brk arch_randomize_brk
129 132
133extern int vectors_user_mapping(void);
134#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
135#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
136
130#endif 137#endif
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
new file mode 100644
index 000000000000..2da8547de6d6
--- /dev/null
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -0,0 +1,46 @@
1#include <asm/assembler.h>
2
3/*
4 * Interrupt handling. Preserves r7, r8, r9
5 */
6 .macro arch_irq_handler_default
7 get_irqnr_preamble r5, lr
81: get_irqnr_and_base r0, r6, r5, lr
9 movne r1, sp
10 @
11 @ routine called with r0 = irq number, r1 = struct pt_regs *
12 @
13 adrne lr, BSYM(1b)
14 bne asm_do_IRQ
15
16#ifdef CONFIG_SMP
17 /*
18 * XXX
19 *
20 * this macro assumes that irqstat (r6) and base (r5) are
21 * preserved from get_irqnr_and_base above
22 */
23 ALT_SMP(test_for_ipi r0, r6, r5, lr)
24 ALT_UP_B(9997f)
25 movne r1, sp
26 adrne lr, BSYM(1b)
27 bne do_IPI
28
29#ifdef CONFIG_LOCAL_TIMERS
30 test_for_ltirq r0, r6, r5, lr
31 movne r0, sp
32 adrne lr, BSYM(1b)
33 bne do_local_timer
34#endif
35#endif
369997:
37 .endm
38
39 .macro arch_irq_handler, symbol_name
40 .align 5
41 .global \symbol_name
42\symbol_name:
43 mov r4, lr
44 arch_irq_handler_default
45 mov pc, r4
46 .endm
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index 2242ce22ec6c..d493d0b742a1 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -4,6 +4,13 @@
4 * Support for FIQ on ARM architectures. 4 * Support for FIQ on ARM architectures.
5 * Written by Philip Blundell <philb@gnu.org>, 1998 5 * Written by Philip Blundell <philb@gnu.org>, 1998
6 * Re-written by Russell King 6 * Re-written by Russell King
7 *
8 * NOTE: The FIQ mode registers are not magically preserved across
9 * suspend/resume.
10 *
11 * Drivers which require these registers to be preserved across power
12 * management operations must implement appropriate suspend/resume handlers to
13 * save and restore them.
7 */ 14 */
8 15
9#ifndef __ASM_FIQ_H 16#ifndef __ASM_FIQ_H
@@ -29,9 +36,21 @@ struct fiq_handler {
29extern int claim_fiq(struct fiq_handler *f); 36extern int claim_fiq(struct fiq_handler *f);
30extern void release_fiq(struct fiq_handler *f); 37extern void release_fiq(struct fiq_handler *f);
31extern void set_fiq_handler(void *start, unsigned int length); 38extern void set_fiq_handler(void *start, unsigned int length);
32extern void set_fiq_regs(struct pt_regs *regs);
33extern void get_fiq_regs(struct pt_regs *regs);
34extern void enable_fiq(int fiq); 39extern void enable_fiq(int fiq);
35extern void disable_fiq(int fiq); 40extern void disable_fiq(int fiq);
36 41
42/* helpers defined in fiqasm.S: */
43extern void __set_fiq_regs(unsigned long const *regs);
44extern void __get_fiq_regs(unsigned long *regs);
45
46static inline void set_fiq_regs(struct pt_regs const *regs)
47{
48 __set_fiq_regs(&regs->ARM_r8);
49}
50
51static inline void get_fiq_regs(struct pt_regs *regs)
52{
53 __get_fiq_regs(&regs->ARM_r8);
54}
55
37#endif 56#endif
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
new file mode 100644
index 000000000000..de5354746924
--- /dev/null
+++ b/arch/arm/include/asm/fncpy.h
@@ -0,0 +1,94 @@
1/*
2 * arch/arm/include/asm/fncpy.h - helper macros for function body copying
3 *
4 * Copyright (C) 2011 Linaro Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20/*
21 * These macros are intended for use when there is a need to copy a low-level
22 * function body into special memory.
23 *
24 * For example, when reconfiguring the SDRAM controller, the code doing the
25 * reconfiguration may need to run from SRAM.
26 *
27 * NOTE: that the copied function body must be entirely self-contained and
28 * position-independent in order for this to work properly.
29 *
30 * NOTE: in order for embedded literals and data to get referenced correctly,
31 * the alignment of functions must be preserved when copying. To ensure this,
32 * the source and destination addresses for fncpy() must be aligned to a
33 * multiple of 8 bytes: you will be get a BUG() if this condition is not met.
34 * You will typically need a ".align 3" directive in the assembler where the
35 * function to be copied is defined, and ensure that your allocator for the
36 * destination buffer returns 8-byte-aligned pointers.
37 *
38 * Typical usage example:
39 *
40 * extern int f(args);
41 * extern uint32_t size_of_f;
42 * int (*copied_f)(args);
43 * void *sram_buffer;
44 *
45 * copied_f = fncpy(sram_buffer, &f, size_of_f);
46 *
47 * ... later, call the function: ...
48 *
49 * copied_f(args);
50 *
51 * The size of the function to be copied can't be determined from C:
52 * this must be determined by other means, such as adding assmbler directives
53 * in the file where f is defined.
54 */
55
56#ifndef __ASM_FNCPY_H
57#define __ASM_FNCPY_H
58
59#include <linux/types.h>
60#include <linux/string.h>
61
62#include <asm/bug.h>
63#include <asm/cacheflush.h>
64
65/*
66 * Minimum alignment requirement for the source and destination addresses
67 * for function copying.
68 */
69#define FNCPY_ALIGN 8
70
71#define fncpy(dest_buf, funcp, size) ({ \
72 uintptr_t __funcp_address; \
73 typeof(funcp) __result; \
74 \
75 asm("" : "=r" (__funcp_address) : "0" (funcp)); \
76 \
77 /* \
78 * Ensure alignment of source and destination addresses, \
79 * disregarding the function's Thumb bit: \
80 */ \
81 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
82 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
83 \
84 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
85 flush_icache_range((unsigned long)(dest_buf), \
86 (unsigned long)(dest_buf) + (size)); \
87 \
88 asm("" : "=r" (__result) \
89 : "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
90 \
91 __result; \
92})
93
94#endif /* !__ASM_FNCPY_H */
diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h
index ee5e03efc1bb..3ad4c10d0d84 100644
--- a/arch/arm/include/asm/fpstate.h
+++ b/arch/arm/include/asm/fpstate.h
@@ -18,7 +18,7 @@
18 * VFP storage area has: 18 * VFP storage area has:
19 * - FPEXC, FPSCR, FPINST and FPINST2. 19 * - FPEXC, FPSCR, FPINST and FPINST2.
20 * - 16 or 32 double precision data registers 20 * - 16 or 32 double precision data registers
21 * - an implementation-dependant word of state for FLDMX/FSTMX (pre-ARMv6) 21 * - an implementation-dependent word of state for FLDMX/FSTMX (pre-ARMv6)
22 * 22 *
23 * FPEXC will always be non-zero once the VFP has been used in this process. 23 * FPEXC will always be non-zero once the VFP has been used in this process.
24 */ 24 */
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 103f7ee97313..f89515adac60 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,12 +2,30 @@
2#define _ASM_ARM_FTRACE 2#define _ASM_ARM_FTRACE
3 3
4#ifdef CONFIG_FUNCTION_TRACER 4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void mcount(void); 9extern void mcount(void);
10extern void __gnu_mcount_nc(void); 10extern void __gnu_mcount_nc(void);
11
12#ifdef CONFIG_DYNAMIC_FTRACE
13struct dyn_arch_ftrace {
14#ifdef CONFIG_OLD_MCOUNT
15 bool old_mcount;
16#endif
17};
18
19static inline unsigned long ftrace_call_adjust(unsigned long addr)
20{
21 /* With Thumb-2, the recorded addresses have the lsb set */
22 return addr & ~1;
23}
24
25extern void ftrace_caller_old(void);
26extern void ftrace_call_old(void);
27#endif
28
11#endif 29#endif
12 30
13#endif 31#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 540a044153a5..8c73900da9ed 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,38 +3,115 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#ifdef CONFIG_SMP 6#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
7 7/* ARM doesn't provide unprivileged exclusive memory accessors */
8#include <asm-generic/futex.h> 8#include <asm-generic/futex.h>
9 9#else
10#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
11 10
12#include <linux/futex.h> 11#include <linux/futex.h>
13#include <linux/preempt.h>
14#include <linux/uaccess.h> 12#include <linux/uaccess.h>
15#include <asm/errno.h> 13#include <asm/errno.h>
16 14
17#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 15#define __futex_atomic_ex_table(err_reg) \
18 __asm__ __volatile__( \
19 "1: ldrt %1, [%2]\n" \
20 " " insn "\n" \
21 "2: strt %0, [%2]\n" \
22 " mov %0, #0\n" \
23 "3:\n" \ 16 "3:\n" \
24 " .pushsection __ex_table,\"a\"\n" \ 17 " .pushsection __ex_table,\"a\"\n" \
25 " .align 3\n" \ 18 " .align 3\n" \
26 " .long 1b, 4f, 2b, 4f\n" \ 19 " .long 1b, 4f, 2b, 4f\n" \
27 " .popsection\n" \ 20 " .popsection\n" \
28 " .pushsection .fixup,\"ax\"\n" \ 21 " .pushsection .fixup,\"ax\"\n" \
29 "4: mov %0, %4\n" \ 22 "4: mov %0, " err_reg "\n" \
30 " b 3b\n" \ 23 " b 3b\n" \
31 " .popsection" \ 24 " .popsection"
25
26#ifdef CONFIG_SMP
27
28#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
29 smp_mb(); \
30 __asm__ __volatile__( \
31 "1: ldrex %1, [%2]\n" \
32 " " insn "\n" \
33 "2: strex %1, %0, [%2]\n" \
34 " teq %1, #0\n" \
35 " bne 1b\n" \
36 " mov %0, #0\n" \
37 __futex_atomic_ex_table("%4") \
32 : "=&r" (ret), "=&r" (oldval) \ 38 : "=&r" (ret), "=&r" (oldval) \
33 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
34 : "cc", "memory") 40 : "cc", "memory")
35 41
36static inline int 42static inline int
37futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 43futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
44 u32 oldval, u32 newval)
45{
46 int ret;
47 u32 val;
48
49 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
50 return -EFAULT;
51
52 smp_mb();
53 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
54 "1: ldrex %1, [%4]\n"
55 " teq %1, %2\n"
56 " ite eq @ explicit IT needed for the 2b label\n"
57 "2: strexeq %0, %3, [%4]\n"
58 " movne %0, #0\n"
59 " teq %0, #0\n"
60 " bne 1b\n"
61 __futex_atomic_ex_table("%5")
62 : "=&r" (ret), "=&r" (val)
63 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
64 : "cc", "memory");
65 smp_mb();
66
67 *uval = val;
68 return ret;
69}
70
71#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
72
73#include <linux/preempt.h>
74#include <asm/domain.h>
75
76#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
77 __asm__ __volatile__( \
78 "1: " T(ldr) " %1, [%2]\n" \
79 " " insn "\n" \
80 "2: " T(str) " %0, [%2]\n" \
81 " mov %0, #0\n" \
82 __futex_atomic_ex_table("%4") \
83 : "=&r" (ret), "=&r" (oldval) \
84 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
85 : "cc", "memory")
86
87static inline int
88futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
89 u32 oldval, u32 newval)
90{
91 int ret = 0;
92 u32 val;
93
94 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
95 return -EFAULT;
96
97 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
98 "1: " T(ldr) " %1, [%4]\n"
99 " teq %1, %2\n"
100 " it eq @ explicit IT needed for the 2b label\n"
101 "2: " T(streq) " %3, [%4]\n"
102 __futex_atomic_ex_table("%5")
103 : "+r" (ret), "=&r" (val)
104 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
105 : "cc", "memory");
106
107 *uval = val;
108 return ret;
109}
110
111#endif /* !SMP */
112
113static inline int
114futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
38{ 115{
39 int op = (encoded_op >> 28) & 7; 116 int op = (encoded_op >> 28) & 7;
40 int cmp = (encoded_op >> 24) & 15; 117 int cmp = (encoded_op >> 24) & 15;
@@ -45,7 +122,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 122 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg; 123 oparg = 1 << oparg;
47 124
48 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 125 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
49 return -EFAULT; 126 return -EFAULT;
50 127
51 pagefault_disable(); /* implies preempt_disable() */ 128 pagefault_disable(); /* implies preempt_disable() */
@@ -86,40 +163,6 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
86 return ret; 163 return ret;
87} 164}
88 165
89static inline int 166#endif /* !(CPU_USE_DOMAINS && SMP) */
90futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
91{
92 int val;
93
94 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
95 return -EFAULT;
96
97 pagefault_disable(); /* implies preempt_disable() */
98
99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
100 "1: ldrt %0, [%3]\n"
101 " teq %0, %1\n"
102 " it eq @ explicit IT needed for the 2b label\n"
103 "2: streqt %2, [%3]\n"
104 "3:\n"
105 " .pushsection __ex_table,\"a\"\n"
106 " .align 3\n"
107 " .long 1b, 4f, 2b, 4f\n"
108 " .popsection\n"
109 " .pushsection .fixup,\"ax\"\n"
110 "4: mov %0, %4\n"
111 " b 3b\n"
112 " .popsection"
113 : "=&r" (val)
114 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
115 : "cc", "memory");
116
117 pagefault_enable(); /* subsumes preempt_enable() */
118
119 return val;
120}
121
122#endif /* !SMP */
123
124#endif /* __KERNEL__ */ 167#endif /* __KERNEL__ */
125#endif /* _ASM_ARM_FUTEX_H */ 168#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
new file mode 100644
index 000000000000..7e30874377e6
--- /dev/null
+++ b/arch/arm/include/asm/glue-cache.h
@@ -0,0 +1,146 @@
1/*
2 * arch/arm/include/asm/glue-cache.h
3 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef ASM_GLUE_CACHE_H
11#define ASM_GLUE_CACHE_H
12
13#include <asm/glue.h>
14
15/*
16 * Cache Model
17 * ===========
18 */
19#undef _CACHE
20#undef MULTI_CACHE
21
22#if defined(CONFIG_CPU_CACHE_V3)
23# ifdef _CACHE
24# define MULTI_CACHE 1
25# else
26# define _CACHE v3
27# endif
28#endif
29
30#if defined(CONFIG_CPU_CACHE_V4)
31# ifdef _CACHE
32# define MULTI_CACHE 1
33# else
34# define _CACHE v4
35# endif
36#endif
37
38#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
39 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
40 defined(CONFIG_CPU_ARM1026)
41# define MULTI_CACHE 1
42#endif
43
44#if defined(CONFIG_CPU_FA526)
45# ifdef _CACHE
46# define MULTI_CACHE 1
47# else
48# define _CACHE fa
49# endif
50#endif
51
52#if defined(CONFIG_CPU_ARM926T)
53# ifdef _CACHE
54# define MULTI_CACHE 1
55# else
56# define _CACHE arm926
57# endif
58#endif
59
60#if defined(CONFIG_CPU_ARM940T)
61# ifdef _CACHE
62# define MULTI_CACHE 1
63# else
64# define _CACHE arm940
65# endif
66#endif
67
68#if defined(CONFIG_CPU_ARM946E)
69# ifdef _CACHE
70# define MULTI_CACHE 1
71# else
72# define _CACHE arm946
73# endif
74#endif
75
76#if defined(CONFIG_CPU_CACHE_V4WB)
77# ifdef _CACHE
78# define MULTI_CACHE 1
79# else
80# define _CACHE v4wb
81# endif
82#endif
83
84#if defined(CONFIG_CPU_XSCALE)
85# ifdef _CACHE
86# define MULTI_CACHE 1
87# else
88# define _CACHE xscale
89# endif
90#endif
91
92#if defined(CONFIG_CPU_XSC3)
93# ifdef _CACHE
94# define MULTI_CACHE 1
95# else
96# define _CACHE xsc3
97# endif
98#endif
99
100#if defined(CONFIG_CPU_MOHAWK)
101# ifdef _CACHE
102# define MULTI_CACHE 1
103# else
104# define _CACHE mohawk
105# endif
106#endif
107
108#if defined(CONFIG_CPU_FEROCEON)
109# define MULTI_CACHE 1
110#endif
111
112#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
113//# ifdef _CACHE
114# define MULTI_CACHE 1
115//# else
116//# define _CACHE v6
117//# endif
118#endif
119
120#if defined(CONFIG_CPU_V7)
121//# ifdef _CACHE
122# define MULTI_CACHE 1
123//# else
124//# define _CACHE v7
125//# endif
126#endif
127
128#if !defined(_CACHE) && !defined(MULTI_CACHE)
129#error Unknown cache maintenance model
130#endif
131
132#ifndef MULTI_CACHE
133#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
134#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
135#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
136#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
137#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
138#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
139#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
140
141#define dmac_map_area __glue(_CACHE,_dma_map_area)
142#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
143#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
144#endif
145
146#endif
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
new file mode 100644
index 000000000000..354d571e8bcc
--- /dev/null
+++ b/arch/arm/include/asm/glue-df.h
@@ -0,0 +1,110 @@
1/*
2 * arch/arm/include/asm/glue-df.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef ASM_GLUE_DF_H
12#define ASM_GLUE_DF_H
13
14#include <asm/glue.h>
15
16/*
17 * Data Abort Model
18 * ================
19 *
20 * We have the following to choose from:
21 * arm6 - ARM6 style
22 * arm7 - ARM7 style
23 * v4_early - ARMv4 without Thumb early abort handler
24 * v4t_late - ARMv4 with Thumb late abort handler
25 * v4t_early - ARMv4 with Thumb early abort handler
26 * v5tej_early - ARMv5 with Thumb and Java early abort handler
27 * xscale - ARMv5 with Thumb with Xscale extensions
28 * v6_early - ARMv6 generic early abort handler
29 * v7_early - ARMv7 generic early abort handler
30 */
31#undef CPU_DABORT_HANDLER
32#undef MULTI_DABORT
33
34#if defined(CONFIG_CPU_ARM610)
35# ifdef CPU_DABORT_HANDLER
36# define MULTI_DABORT 1
37# else
38# define CPU_DABORT_HANDLER cpu_arm6_data_abort
39# endif
40#endif
41
42#if defined(CONFIG_CPU_ARM710)
43# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1
45# else
46# define CPU_DABORT_HANDLER cpu_arm7_data_abort
47# endif
48#endif
49
50#ifdef CONFIG_CPU_ABRT_LV4T
51# ifdef CPU_DABORT_HANDLER
52# define MULTI_DABORT 1
53# else
54# define CPU_DABORT_HANDLER v4t_late_abort
55# endif
56#endif
57
58#ifdef CONFIG_CPU_ABRT_EV4
59# ifdef CPU_DABORT_HANDLER
60# define MULTI_DABORT 1
61# else
62# define CPU_DABORT_HANDLER v4_early_abort
63# endif
64#endif
65
66#ifdef CONFIG_CPU_ABRT_EV4T
67# ifdef CPU_DABORT_HANDLER
68# define MULTI_DABORT 1
69# else
70# define CPU_DABORT_HANDLER v4t_early_abort
71# endif
72#endif
73
74#ifdef CONFIG_CPU_ABRT_EV5TJ
75# ifdef CPU_DABORT_HANDLER
76# define MULTI_DABORT 1
77# else
78# define CPU_DABORT_HANDLER v5tj_early_abort
79# endif
80#endif
81
82#ifdef CONFIG_CPU_ABRT_EV5T
83# ifdef CPU_DABORT_HANDLER
84# define MULTI_DABORT 1
85# else
86# define CPU_DABORT_HANDLER v5t_early_abort
87# endif
88#endif
89
90#ifdef CONFIG_CPU_ABRT_EV6
91# ifdef CPU_DABORT_HANDLER
92# define MULTI_DABORT 1
93# else
94# define CPU_DABORT_HANDLER v6_early_abort
95# endif
96#endif
97
98#ifdef CONFIG_CPU_ABRT_EV7
99# ifdef CPU_DABORT_HANDLER
100# define MULTI_DABORT 1
101# else
102# define CPU_DABORT_HANDLER v7_early_abort
103# endif
104#endif
105
106#ifndef CPU_DABORT_HANDLER
107#error Unknown data abort handler type
108#endif
109
110#endif
diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h
new file mode 100644
index 000000000000..d385f37c13f0
--- /dev/null
+++ b/arch/arm/include/asm/glue-pf.h
@@ -0,0 +1,57 @@
1/*
2 * arch/arm/include/asm/glue-pf.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 * Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef ASM_GLUE_PF_H
12#define ASM_GLUE_PF_H
13
14#include <asm/glue.h>
15
16/*
17 * Prefetch Abort Model
18 * ================
19 *
20 * We have the following to choose from:
21 * legacy - no IFSR, no IFAR
22 * v6 - ARMv6: IFSR, no IFAR
23 * v7 - ARMv7: IFSR and IFAR
24 */
25
26#undef CPU_PABORT_HANDLER
27#undef MULTI_PABORT
28
29#ifdef CONFIG_CPU_PABRT_LEGACY
30# ifdef CPU_PABORT_HANDLER
31# define MULTI_PABORT 1
32# else
33# define CPU_PABORT_HANDLER legacy_pabort
34# endif
35#endif
36
37#ifdef CONFIG_CPU_PABRT_V6
38# ifdef CPU_PABORT_HANDLER
39# define MULTI_PABORT 1
40# else
41# define CPU_PABORT_HANDLER v6_pabort
42# endif
43#endif
44
45#ifdef CONFIG_CPU_PABRT_V7
46# ifdef CPU_PABORT_HANDLER
47# define MULTI_PABORT 1
48# else
49# define CPU_PABORT_HANDLER v7_pabort
50# endif
51#endif
52
53#ifndef CPU_PABORT_HANDLER
54#error Unknown prefetch abort handler type
55#endif
56
57#endif
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
new file mode 100644
index 000000000000..e2be7f142668
--- /dev/null
+++ b/arch/arm/include/asm/glue-proc.h
@@ -0,0 +1,264 @@
1/*
2 * arch/arm/include/asm/glue-proc.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 * Copyright (C) 2000 Deep Blue Solutions Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef ASM_GLUE_PROC_H
12#define ASM_GLUE_PROC_H
13
14#include <asm/glue.h>
15
16/*
17 * Work out if we need multiple CPU support
18 */
19#undef MULTI_CPU
20#undef CPU_NAME
21
22/*
23 * CPU_NAME - the prefix for CPU related functions
24 */
25
26#ifdef CONFIG_CPU_ARM610
27# ifdef CPU_NAME
28# undef MULTI_CPU
29# define MULTI_CPU
30# else
31# define CPU_NAME cpu_arm6
32# endif
33#endif
34
35#ifdef CONFIG_CPU_ARM7TDMI
36# ifdef CPU_NAME
37# undef MULTI_CPU
38# define MULTI_CPU
39# else
40# define CPU_NAME cpu_arm7tdmi
41# endif
42#endif
43
44#ifdef CONFIG_CPU_ARM710
45# ifdef CPU_NAME
46# undef MULTI_CPU
47# define MULTI_CPU
48# else
49# define CPU_NAME cpu_arm7
50# endif
51#endif
52
53#ifdef CONFIG_CPU_ARM720T
54# ifdef CPU_NAME
55# undef MULTI_CPU
56# define MULTI_CPU
57# else
58# define CPU_NAME cpu_arm720
59# endif
60#endif
61
62#ifdef CONFIG_CPU_ARM740T
63# ifdef CPU_NAME
64# undef MULTI_CPU
65# define MULTI_CPU
66# else
67# define CPU_NAME cpu_arm740
68# endif
69#endif
70
71#ifdef CONFIG_CPU_ARM9TDMI
72# ifdef CPU_NAME
73# undef MULTI_CPU
74# define MULTI_CPU
75# else
76# define CPU_NAME cpu_arm9tdmi
77# endif
78#endif
79
80#ifdef CONFIG_CPU_ARM920T
81# ifdef CPU_NAME
82# undef MULTI_CPU
83# define MULTI_CPU
84# else
85# define CPU_NAME cpu_arm920
86# endif
87#endif
88
89#ifdef CONFIG_CPU_ARM922T
90# ifdef CPU_NAME
91# undef MULTI_CPU
92# define MULTI_CPU
93# else
94# define CPU_NAME cpu_arm922
95# endif
96#endif
97
98#ifdef CONFIG_CPU_FA526
99# ifdef CPU_NAME
100# undef MULTI_CPU
101# define MULTI_CPU
102# else
103# define CPU_NAME cpu_fa526
104# endif
105#endif
106
107#ifdef CONFIG_CPU_ARM925T
108# ifdef CPU_NAME
109# undef MULTI_CPU
110# define MULTI_CPU
111# else
112# define CPU_NAME cpu_arm925
113# endif
114#endif
115
116#ifdef CONFIG_CPU_ARM926T
117# ifdef CPU_NAME
118# undef MULTI_CPU
119# define MULTI_CPU
120# else
121# define CPU_NAME cpu_arm926
122# endif
123#endif
124
125#ifdef CONFIG_CPU_ARM940T
126# ifdef CPU_NAME
127# undef MULTI_CPU
128# define MULTI_CPU
129# else
130# define CPU_NAME cpu_arm940
131# endif
132#endif
133
134#ifdef CONFIG_CPU_ARM946E
135# ifdef CPU_NAME
136# undef MULTI_CPU
137# define MULTI_CPU
138# else
139# define CPU_NAME cpu_arm946
140# endif
141#endif
142
143#ifdef CONFIG_CPU_SA110
144# ifdef CPU_NAME
145# undef MULTI_CPU
146# define MULTI_CPU
147# else
148# define CPU_NAME cpu_sa110
149# endif
150#endif
151
152#ifdef CONFIG_CPU_SA1100
153# ifdef CPU_NAME
154# undef MULTI_CPU
155# define MULTI_CPU
156# else
157# define CPU_NAME cpu_sa1100
158# endif
159#endif
160
161#ifdef CONFIG_CPU_ARM1020
162# ifdef CPU_NAME
163# undef MULTI_CPU
164# define MULTI_CPU
165# else
166# define CPU_NAME cpu_arm1020
167# endif
168#endif
169
170#ifdef CONFIG_CPU_ARM1020E
171# ifdef CPU_NAME
172# undef MULTI_CPU
173# define MULTI_CPU
174# else
175# define CPU_NAME cpu_arm1020e
176# endif
177#endif
178
179#ifdef CONFIG_CPU_ARM1022
180# ifdef CPU_NAME
181# undef MULTI_CPU
182# define MULTI_CPU
183# else
184# define CPU_NAME cpu_arm1022
185# endif
186#endif
187
188#ifdef CONFIG_CPU_ARM1026
189# ifdef CPU_NAME
190# undef MULTI_CPU
191# define MULTI_CPU
192# else
193# define CPU_NAME cpu_arm1026
194# endif
195#endif
196
197#ifdef CONFIG_CPU_XSCALE
198# ifdef CPU_NAME
199# undef MULTI_CPU
200# define MULTI_CPU
201# else
202# define CPU_NAME cpu_xscale
203# endif
204#endif
205
206#ifdef CONFIG_CPU_XSC3
207# ifdef CPU_NAME
208# undef MULTI_CPU
209# define MULTI_CPU
210# else
211# define CPU_NAME cpu_xsc3
212# endif
213#endif
214
215#ifdef CONFIG_CPU_MOHAWK
216# ifdef CPU_NAME
217# undef MULTI_CPU
218# define MULTI_CPU
219# else
220# define CPU_NAME cpu_mohawk
221# endif
222#endif
223
224#ifdef CONFIG_CPU_FEROCEON
225# ifdef CPU_NAME
226# undef MULTI_CPU
227# define MULTI_CPU
228# else
229# define CPU_NAME cpu_feroceon
230# endif
231#endif
232
233#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
234# ifdef CPU_NAME
235# undef MULTI_CPU
236# define MULTI_CPU
237# else
238# define CPU_NAME cpu_v6
239# endif
240#endif
241
242#ifdef CONFIG_CPU_V7
243# ifdef CPU_NAME
244# undef MULTI_CPU
245# define MULTI_CPU
246# else
247# define CPU_NAME cpu_v7
248# endif
249#endif
250
251#ifndef MULTI_CPU
252#define cpu_proc_init __glue(CPU_NAME,_proc_init)
253#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
254#define cpu_reset __glue(CPU_NAME,_reset)
255#define cpu_do_idle __glue(CPU_NAME,_do_idle)
256#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
257#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
258#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
259#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
260#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
261#define cpu_do_resume __glue(CPU_NAME,_do_resume)
262#endif
263
264#endif
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index 234a3fc1c78e..fbf71d75ec83 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -10,12 +10,11 @@
10 * 10 *
11 * This file provides the glue to stick the processor-specific bits 11 * This file provides the glue to stick the processor-specific bits
12 * into the kernel in an efficient manner. The idea is to use branches 12 * into the kernel in an efficient manner. The idea is to use branches
13 * when we're only targetting one class of TLB, or indirect calls 13 * when we're only targeting one class of TLB, or indirect calls
14 * when we're targetting multiple classes of TLBs. 14 * when we're targeting multiple classes of TLBs.
15 */ 15 */
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18
19#ifdef __STDC__ 18#ifdef __STDC__
20#define ____glue(name,fn) name##fn 19#define ____glue(name,fn) name##fn
21#else 20#else
@@ -23,141 +22,4 @@
23#endif 22#endif
24#define __glue(name,fn) ____glue(name,fn) 23#define __glue(name,fn) ____glue(name,fn)
25 24
26
27
28/*
29 * Data Abort Model
30 * ================
31 *
32 * We have the following to choose from:
33 * arm6 - ARM6 style
34 * arm7 - ARM7 style
35 * v4_early - ARMv4 without Thumb early abort handler
36 * v4t_late - ARMv4 with Thumb late abort handler
37 * v4t_early - ARMv4 with Thumb early abort handler
38 * v5tej_early - ARMv5 with Thumb and Java early abort handler
39 * xscale - ARMv5 with Thumb with Xscale extensions
40 * v6_early - ARMv6 generic early abort handler
41 * v7_early - ARMv7 generic early abort handler
42 */
43#undef CPU_DABORT_HANDLER
44#undef MULTI_DABORT
45
46#if defined(CONFIG_CPU_ARM610)
47# ifdef CPU_DABORT_HANDLER
48# define MULTI_DABORT 1
49# else
50# define CPU_DABORT_HANDLER cpu_arm6_data_abort
51# endif
52#endif
53
54#if defined(CONFIG_CPU_ARM710)
55# ifdef CPU_DABORT_HANDLER
56# define MULTI_DABORT 1
57# else
58# define CPU_DABORT_HANDLER cpu_arm7_data_abort
59# endif
60#endif
61
62#ifdef CONFIG_CPU_ABRT_LV4T
63# ifdef CPU_DABORT_HANDLER
64# define MULTI_DABORT 1
65# else
66# define CPU_DABORT_HANDLER v4t_late_abort
67# endif
68#endif
69
70#ifdef CONFIG_CPU_ABRT_EV4
71# ifdef CPU_DABORT_HANDLER
72# define MULTI_DABORT 1
73# else
74# define CPU_DABORT_HANDLER v4_early_abort
75# endif
76#endif
77
78#ifdef CONFIG_CPU_ABRT_EV4T
79# ifdef CPU_DABORT_HANDLER
80# define MULTI_DABORT 1
81# else
82# define CPU_DABORT_HANDLER v4t_early_abort
83# endif
84#endif
85
86#ifdef CONFIG_CPU_ABRT_EV5TJ
87# ifdef CPU_DABORT_HANDLER
88# define MULTI_DABORT 1
89# else
90# define CPU_DABORT_HANDLER v5tj_early_abort
91# endif
92#endif
93
94#ifdef CONFIG_CPU_ABRT_EV5T
95# ifdef CPU_DABORT_HANDLER
96# define MULTI_DABORT 1
97# else
98# define CPU_DABORT_HANDLER v5t_early_abort
99# endif
100#endif
101
102#ifdef CONFIG_CPU_ABRT_EV6
103# ifdef CPU_DABORT_HANDLER
104# define MULTI_DABORT 1
105# else
106# define CPU_DABORT_HANDLER v6_early_abort
107# endif
108#endif
109
110#ifdef CONFIG_CPU_ABRT_EV7
111# ifdef CPU_DABORT_HANDLER
112# define MULTI_DABORT 1
113# else
114# define CPU_DABORT_HANDLER v7_early_abort
115# endif
116#endif
117
118#ifndef CPU_DABORT_HANDLER
119#error Unknown data abort handler type
120#endif
121
122/*
123 * Prefetch Abort Model
124 * ================
125 *
126 * We have the following to choose from:
127 * legacy - no IFSR, no IFAR
128 * v6 - ARMv6: IFSR, no IFAR
129 * v7 - ARMv7: IFSR and IFAR
130 */
131
132#undef CPU_PABORT_HANDLER
133#undef MULTI_PABORT
134
135#ifdef CONFIG_CPU_PABRT_LEGACY
136# ifdef CPU_PABORT_HANDLER
137# define MULTI_PABORT 1
138# else
139# define CPU_PABORT_HANDLER legacy_pabort
140# endif
141#endif
142
143#ifdef CONFIG_CPU_PABRT_V6
144# ifdef CPU_PABORT_HANDLER
145# define MULTI_PABORT 1
146# else
147# define CPU_PABORT_HANDLER v6_pabort
148# endif
149#endif
150
151#ifdef CONFIG_CPU_PABRT_V7
152# ifdef CPU_PABORT_HANDLER
153# define MULTI_PABORT 1
154# else
155# define CPU_PABORT_HANDLER v7_pabort
156# endif
157#endif
158
159#ifndef CPU_PABORT_HANDLER
160#error Unknown prefetch abort handler type
161#endif
162
163#endif 25#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 6d7485aff955..89ad1805e579 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,13 +5,31 @@
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <asm/irq.h> 6#include <asm/irq.h>
7 7
8#define NR_IPI 5
9
8typedef struct { 10typedef struct {
9 unsigned int __softirq_pending; 11 unsigned int __softirq_pending;
12#ifdef CONFIG_LOCAL_TIMERS
10 unsigned int local_timer_irqs; 13 unsigned int local_timer_irqs;
14#endif
15#ifdef CONFIG_SMP
16 unsigned int ipi_irqs[NR_IPI];
17#endif
11} ____cacheline_aligned irq_cpustat_t; 18} ____cacheline_aligned irq_cpustat_t;
12 19
13#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 20#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
14 21
22#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
23#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
24
25#ifdef CONFIG_SMP
26u64 smp_irq_stat_cpu(unsigned int cpu);
27#else
28#define smp_irq_stat_cpu(cpu) 0
29#endif
30
31#define arch_irq_stat_cpu smp_irq_stat_cpu
32
15#if NR_IRQS > 512 33#if NR_IRQS > 512
16#define HARDIRQ_BITS 10 34#define HARDIRQ_BITS 10
17#elif NR_IRQS > 256 35#elif NR_IRQS > 256
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 6bcba48800fe..16bd48031583 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -21,9 +21,6 @@
21#define __ASM_ARM_HARDWARE_L2X0_H 21#define __ASM_ARM_HARDWARE_L2X0_H
22 22
23#define L2X0_CACHE_ID 0x000 23#define L2X0_CACHE_ID 0x000
24#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
25#define L2X0_CACHE_ID_PART_L210 (1 << 6)
26#define L2X0_CACHE_ID_PART_L310 (3 << 6)
27#define L2X0_CACHE_TYPE 0x004 24#define L2X0_CACHE_TYPE 0x004
28#define L2X0_CTRL 0x100 25#define L2X0_CTRL 0x100
29#define L2X0_AUX_CTRL 0x104 26#define L2X0_AUX_CTRL 0x104
@@ -39,6 +36,7 @@
39#define L2X0_RAW_INTR_STAT 0x21C 36#define L2X0_RAW_INTR_STAT 0x21C
40#define L2X0_INTR_CLEAR 0x220 37#define L2X0_INTR_CLEAR 0x220
41#define L2X0_CACHE_SYNC 0x730 38#define L2X0_CACHE_SYNC 0x730
39#define L2X0_DUMMY_REG 0x740
42#define L2X0_INV_LINE_PA 0x770 40#define L2X0_INV_LINE_PA 0x770
43#define L2X0_INV_WAY 0x77C 41#define L2X0_INV_WAY 0x77C
44#define L2X0_CLEAN_LINE_PA 0x7B0 42#define L2X0_CLEAN_LINE_PA 0x7B0
@@ -53,6 +51,26 @@
53#define L2X0_LINE_DATA 0xF10 51#define L2X0_LINE_DATA 0xF10
54#define L2X0_LINE_TAG 0xF30 52#define L2X0_LINE_TAG 0xF30
55#define L2X0_DEBUG_CTRL 0xF40 53#define L2X0_DEBUG_CTRL 0xF40
54#define L2X0_PREFETCH_CTRL 0xF60
55#define L2X0_POWER_CTRL 0xF80
56#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
57#define L2X0_STNDBY_MODE_EN (1 << 0)
58
59/* Registers shifts and masks */
60#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
61#define L2X0_CACHE_ID_PART_L210 (1 << 6)
62#define L2X0_CACHE_ID_PART_L310 (3 << 6)
63
64#define L2X0_AUX_CTRL_MASK 0xc0000fff
65#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
66#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
67#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
68#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
69#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
70#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
71#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
72#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
73#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
56 74
57#ifndef __ASSEMBLY__ 75#ifndef __ASSEMBLY__
58extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); 76extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 212e47828c79..7ecd793b8f5a 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -21,18 +21,6 @@
21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) 21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) 22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
23 23
24struct tracectx {
25 unsigned int etb_bufsz;
26 void __iomem *etb_regs;
27 void __iomem *etm_regs;
28 unsigned long flags;
29 int ncmppairs;
30 int etm_portsz;
31 struct device *dev;
32 struct clk *emu_clk;
33 struct mutex mutex;
34};
35
36#define TRACER_TIMEOUT 10000 24#define TRACER_TIMEOUT 10000
37 25
38#define etm_writel(t, v, x) \ 26#define etm_writel(t, v, x) \
@@ -112,10 +100,10 @@ struct tracectx {
112 100
113/* ETM status register, "ETM Architecture", 3.3.2 */ 101/* ETM status register, "ETM Architecture", 3.3.2 */
114#define ETMR_STATUS (0x10) 102#define ETMR_STATUS (0x10)
115#define ETMST_OVERFLOW (1 << 0) 103#define ETMST_OVERFLOW BIT(0)
116#define ETMST_PROGBIT (1 << 1) 104#define ETMST_PROGBIT BIT(1)
117#define ETMST_STARTSTOP (1 << 2) 105#define ETMST_STARTSTOP BIT(2)
118#define ETMST_TRIGGER (1 << 3) 106#define ETMST_TRIGGER BIT(3)
119 107
120#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) 108#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
121#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) 109#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
@@ -123,7 +111,7 @@ struct tracectx {
123 111
124#define ETMR_TRACEENCTRL2 0x1c 112#define ETMR_TRACEENCTRL2 0x1c
125#define ETMR_TRACEENCTRL 0x24 113#define ETMR_TRACEENCTRL 0x24
126#define ETMTE_INCLEXCL (1 << 24) 114#define ETMTE_INCLEXCL BIT(24)
127#define ETMR_TRACEENEVT 0x20 115#define ETMR_TRACEENEVT 0x20
128#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ 116#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
129 ETMCTRL_DATA_DO_ADDR | \ 117 ETMCTRL_DATA_DO_ADDR | \
@@ -146,12 +134,12 @@ struct tracectx {
146#define ETBR_CTRL 0x20 134#define ETBR_CTRL 0x20
147#define ETBR_FORMATTERCTRL 0x304 135#define ETBR_FORMATTERCTRL 0x304
148#define ETBFF_ENFTC 1 136#define ETBFF_ENFTC 1
149#define ETBFF_ENFCONT (1 << 1) 137#define ETBFF_ENFCONT BIT(1)
150#define ETBFF_FONFLIN (1 << 4) 138#define ETBFF_FONFLIN BIT(4)
151#define ETBFF_MANUAL_FLUSH (1 << 6) 139#define ETBFF_MANUAL_FLUSH BIT(6)
152#define ETBFF_TRIGIN (1 << 8) 140#define ETBFF_TRIGIN BIT(8)
153#define ETBFF_TRIGEVT (1 << 9) 141#define ETBFF_TRIGEVT BIT(9)
154#define ETBFF_TRIGFL (1 << 10) 142#define ETBFF_TRIGFL BIT(10)
155 143
156#define etb_writel(t, v, x) \ 144#define etb_writel(t, v, x) \
157 (__raw_writel((v), (t)->etb_regs + (x))) 145 (__raw_writel((v), (t)->etb_regs + (x)))
diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S
new file mode 100644
index 000000000000..c115b82fe80a
--- /dev/null
+++ b/arch/arm/include/asm/hardware/entry-macro-gic.S
@@ -0,0 +1,75 @@
1/*
2 * arch/arm/include/asm/hardware/entry-macro-gic.S
3 *
4 * Low-level IRQ helper macros for GIC
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <asm/hardware/gic.h>
12
13#ifndef HAVE_GET_IRQNR_PREAMBLE
14 .macro get_irqnr_preamble, base, tmp
15 ldr \base, =gic_cpu_base_addr
16 ldr \base, [\base]
17 .endm
18#endif
19
20/*
21 * The interrupt numbering scheme is defined in the
22 * interrupt controller spec. To wit:
23 *
24 * Interrupts 0-15 are IPI
25 * 16-28 are reserved
26 * 29-31 are local. We allow 30 to be used for the watchdog.
27 * 32-1020 are global
28 * 1021-1022 are reserved
29 * 1023 is "spurious" (no interrupt)
30 *
31 * For now, we ignore all local interrupts so only return an interrupt if it's
32 * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
33 *
34 * A simple read from the controller will tell us the number of the highest
35 * priority enabled interrupt. We then just need to check whether it is in the
36 * valid range for an IRQ (30-1020 inclusive).
37 */
38
39 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
40
41 ldr \irqstat, [\base, #GIC_CPU_INTACK]
42 /* bits 12-10 = src CPU, 9-0 = int # */
43
44 ldr \tmp, =1021
45 bic \irqnr, \irqstat, #0x1c00
46 cmp \irqnr, #29
47 cmpcc \irqnr, \irqnr
48 cmpne \irqnr, \tmp
49 cmpcs \irqnr, \irqnr
50 .endm
51
52/* We assume that irqstat (the raw value of the IRQ acknowledge
53 * register) is preserved from the macro above.
54 * If there is an IPI, we immediately signal end of interrupt on the
55 * controller, since this requires the original irqstat value which
56 * we won't easily be able to recreate later.
57 */
58
59 .macro test_for_ipi, irqnr, irqstat, base, tmp
60 bic \irqnr, \irqstat, #0x1c00
61 cmp \irqnr, #16
62 strcc \irqstat, [\base, #GIC_CPU_EOI]
63 cmpcs \irqnr, \irqnr
64 .endm
65
66/* As above, this assumes that irqstat and base are preserved.. */
67
68 .macro test_for_ltirq, irqnr, irqstat, base, tmp
69 bic \irqnr, \irqstat, #0x1c00
70 mov \tmp, #0
71 cmp \irqnr, #29
72 moveq \tmp, #1
73 streq \irqstat, [\base, #GIC_CPU_EOI]
74 cmp \tmp, #0
75 .endm
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 7f34333bb545..0691f9dcc500 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -33,10 +33,14 @@
33#define GIC_DIST_SOFTINT 0xf00 33#define GIC_DIST_SOFTINT 0xf00
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); 36extern void __iomem *gic_cpu_base_addr;
37void gic_cpu_init(unsigned int gic_nr, void __iomem *base); 37extern struct irq_chip gic_arch_extn;
38
39void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
40void gic_secondary_init(unsigned int);
38void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 41void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
39void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); 42void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
43void gic_enable_ppi(unsigned int);
40#endif 44#endif
41 45
42#endif 46#endif
diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h
index 10382a3dcec9..794220b087d2 100644
--- a/arch/arm/include/asm/hardware/icst.h
+++ b/arch/arm/include/asm/hardware/icst.h
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * Support functions for calculating clocks/divisors for the ICST 10 * Support functions for calculating clocks/divisors for the ICST
11 * clock generators. See http://www.icst.com/ for more information 11 * clock generators. See http://www.idt.com/ for more information
12 * on these devices. 12 * on these devices.
13 */ 13 */
14#ifndef ASMARM_HARDWARE_ICST_H 14#ifndef ASMARM_HARDWARE_ICST_H
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7fc7ebd..b2f95c72287c 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,8 @@ extern unsigned long it8152_base_address;
75 IT8152_PD_IRQ(1) USB (USBR) 75 IT8152_PD_IRQ(1) USB (USBR)
76 IT8152_PD_IRQ(0) Audio controller (ACR) 76 IT8152_PD_IRQ(0) Audio controller (ACR)
77 */ 77 */
78#define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) 78#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
79#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
79 80
80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ 81/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
81#define IT8152_LD_IRQ_COUNT 9 82#define IT8152_LD_IRQ_COUNT 9
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index f35b86e68dd5..e4a04e4e5627 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -16,7 +16,7 @@
16 * make it not entierly compatible with the PL080 specification from 16 * make it not entierly compatible with the PL080 specification from
17 * ARM. When in doubt, check the Samsung documentation first. 17 * ARM. When in doubt, check the Samsung documentation first.
18 * 18 *
19 * The Samsung defines are PL080S, and add an extra controll register, 19 * The Samsung defines are PL080S, and add an extra control register,
20 * the ability to move more than 2^11 counts of data and some extra 20 * the ability to move more than 2^11 counts of data and some extra
21 * OneNAND features. 21 * OneNAND features.
22*/ 22*/
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index a101f10bb5b1..e0d1c0cfa548 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -50,8 +50,17 @@
50#define SCPCELLID2 0xFF8 50#define SCPCELLID2 0xFF8
51#define SCPCELLID3 0xFFC 51#define SCPCELLID3 0xFFC
52 52
53#define SCCTRL_TIMEREN0SEL_REFCLK (0 << 15)
54#define SCCTRL_TIMEREN0SEL_TIMCLK (1 << 15)
55
56#define SCCTRL_TIMEREN1SEL_REFCLK (0 << 17)
57#define SCCTRL_TIMEREN1SEL_TIMCLK (1 << 17)
58
53static inline void sysctl_soft_reset(void __iomem *base) 59static inline void sysctl_soft_reset(void __iomem *base)
54{ 60{
61 /* switch to slow mode */
62 writel(0x2, base + SCCTRL);
63
55 /* writing any value to SCSYSSTAT reg will reset system */ 64 /* writing any value to SCSYSSTAT reg will reset system */
56 writel(0, base + SCSYSSTAT); 65 writel(0, base + SCSYSSTAT);
57} 66}
diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h
new file mode 100644
index 000000000000..4384d81eee79
--- /dev/null
+++ b/arch/arm/include/asm/hardware/timer-sp.h
@@ -0,0 +1,2 @@
1void sp804_clocksource_init(void __iomem *, const char *);
2void sp804_clockevents_init(void __iomem *, unsigned int, const char *);
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 5aff58126602..a4edd19dd3d6 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -19,14 +19,36 @@
19 19
20extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
21 21
22#define ARCH_NEEDS_KMAP_HIGH_GET
23
24extern void *kmap_high(struct page *page); 22extern void *kmap_high(struct page *page);
25extern void *kmap_high_get(struct page *page);
26extern void kunmap_high(struct page *page); 23extern void kunmap_high(struct page *page);
27 24
28extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); 25/*
29extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); 26 * The reason for kmap_high_get() is to ensure that the currently kmap'd
27 * page usage count does not decrease to zero while we're using its
28 * existing virtual mapping in an atomic context. With a VIVT cache this
29 * is essential to do, but with a VIPT cache this is only an optimization
30 * so not to pay the price of establishing a second mapping if an existing
31 * one can be used. However, on platforms without hardware TLB maintenance
32 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
33 * the locking involved must also disable IRQs which is incompatible with
34 * the IPI mechanism used by global TLB operations.
35 */
36#define ARCH_NEEDS_KMAP_HIGH_GET
37#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
38#undef ARCH_NEEDS_KMAP_HIGH_GET
39#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
40#error "The sum of features in your kernel config cannot be supported together"
41#endif
42#endif
43
44#ifdef ARCH_NEEDS_KMAP_HIGH_GET
45extern void *kmap_high_get(struct page *page);
46#else
47static inline void *kmap_high_get(struct page *page)
48{
49 return NULL;
50}
51#endif
30 52
31/* 53/*
32 * The following functions are already defined by <linux/highmem.h> 54 * The following functions are already defined by <linux/highmem.h>
@@ -35,9 +57,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
35#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
36extern void *kmap(struct page *page); 58extern void *kmap(struct page *page);
37extern void kunmap(struct page *page); 59extern void kunmap(struct page *page);
38extern void *kmap_atomic(struct page *page, enum km_type type); 60extern void *__kmap_atomic(struct page *page);
39extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 61extern void __kunmap_atomic(void *kvaddr);
40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 62extern void *kmap_atomic_pfn(unsigned long pfn);
41extern struct page *kmap_atomic_to_page(const void *ptr); 63extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif 64#endif
43 65
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000000..f389b2704d82
--- /dev/null
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -0,0 +1,133 @@
1#ifndef _ARM_HW_BREAKPOINT_H
2#define _ARM_HW_BREAKPOINT_H
3
4#ifdef __KERNEL__
5
6struct task_struct;
7
8#ifdef CONFIG_HAVE_HW_BREAKPOINT
9
10struct arch_hw_breakpoint_ctrl {
11 u32 __reserved : 9,
12 mismatch : 1,
13 : 9,
14 len : 8,
15 type : 2,
16 privilege : 2,
17 enabled : 1;
18};
19
20struct arch_hw_breakpoint {
21 u32 address;
22 u32 trigger;
23 struct arch_hw_breakpoint_ctrl step_ctrl;
24 struct arch_hw_breakpoint_ctrl ctrl;
25};
26
27static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
28{
29 return (ctrl.mismatch << 22) | (ctrl.len << 5) | (ctrl.type << 3) |
30 (ctrl.privilege << 1) | ctrl.enabled;
31}
32
33static inline void decode_ctrl_reg(u32 reg,
34 struct arch_hw_breakpoint_ctrl *ctrl)
35{
36 ctrl->enabled = reg & 0x1;
37 reg >>= 1;
38 ctrl->privilege = reg & 0x3;
39 reg >>= 2;
40 ctrl->type = reg & 0x3;
41 reg >>= 2;
42 ctrl->len = reg & 0xff;
43 reg >>= 17;
44 ctrl->mismatch = reg & 0x1;
45}
46
47/* Debug architecture numbers. */
48#define ARM_DEBUG_ARCH_RESERVED 0 /* In case of ptrace ABI updates. */
49#define ARM_DEBUG_ARCH_V6 1
50#define ARM_DEBUG_ARCH_V6_1 2
51#define ARM_DEBUG_ARCH_V7_ECP14 3
52#define ARM_DEBUG_ARCH_V7_MM 4
53
54/* Breakpoint */
55#define ARM_BREAKPOINT_EXECUTE 0
56
57/* Watchpoints */
58#define ARM_BREAKPOINT_LOAD 1
59#define ARM_BREAKPOINT_STORE 2
60
61/* Privilege Levels */
62#define ARM_BREAKPOINT_PRIV 1
63#define ARM_BREAKPOINT_USER 2
64
65/* Lengths */
66#define ARM_BREAKPOINT_LEN_1 0x1
67#define ARM_BREAKPOINT_LEN_2 0x3
68#define ARM_BREAKPOINT_LEN_4 0xf
69#define ARM_BREAKPOINT_LEN_8 0xff
70
71/* Limits */
72#define ARM_MAX_BRP 16
73#define ARM_MAX_WRP 16
74#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
75
76/* DSCR method of entry bits. */
77#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
78#define ARM_ENTRY_BREAKPOINT 0x1
79#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
80#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
81
82/* DSCR monitor/halting bits. */
83#define ARM_DSCR_HDBGEN (1 << 14)
84#define ARM_DSCR_MDBGEN (1 << 15)
85
86/* opcode2 numbers for the co-processor instructions. */
87#define ARM_OP2_BVR 4
88#define ARM_OP2_BCR 5
89#define ARM_OP2_WVR 6
90#define ARM_OP2_WCR 7
91
92/* Base register numbers for the debug registers. */
93#define ARM_BASE_BVR 64
94#define ARM_BASE_BCR 80
95#define ARM_BASE_WVR 96
96#define ARM_BASE_WCR 112
97
98/* Accessor macros for the debug registers. */
99#define ARM_DBG_READ(M, OP2, VAL) do {\
100 asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\
101} while (0)
102
103#define ARM_DBG_WRITE(M, OP2, VAL) do {\
104 asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\
105} while (0)
106
107struct notifier_block;
108struct perf_event;
109struct pmu;
110
111extern struct pmu perf_ops_bp;
112extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
113 int *gen_len, int *gen_type);
114extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
115extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
116extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
117 unsigned long val, void *data);
118
119extern u8 arch_get_debug_arch(void);
120extern u8 arch_get_max_wp_len(void);
121extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
122
123int arch_install_hw_breakpoint(struct perf_event *bp);
124void arch_uninstall_hw_breakpoint(struct perf_event *bp);
125void hw_breakpoint_pmu_read(struct perf_event *bp);
126int hw_breakpoint_slots(int type);
127
128#else
129static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {}
130
131#endif /* CONFIG_HAVE_HW_BREAKPOINT */
132#endif /* __KERNEL__ */
133#endif /* _ARM_HW_BREAKPOINT_H */
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index 90831f6f5f5c..a71b417b1856 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -10,18 +10,12 @@ static inline void ack_bad_irq(int irq)
10 irq_err_count++; 10 irq_err_count++;
11} 11}
12 12
13/*
14 * Obsolete inline function for calling irq descriptor handlers.
15 */
16static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc)
17{
18 desc->handle_irq(irq, desc);
19}
20
21void set_irq_flags(unsigned int irq, unsigned int flags); 13void set_irq_flags(unsigned int irq, unsigned int flags);
22 14
23#define IRQF_VALID (1 << 0) 15#define IRQF_VALID (1 << 0)
24#define IRQF_PROBE (1 << 1) 16#define IRQF_PROBE (1 << 1)
25#define IRQF_NOAUTOEN (1 << 2) 17#define IRQF_NOAUTOEN (1 << 2)
26 18
19#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
20
27#endif 21#endif
diff --git a/arch/arm/include/asm/i8253.h b/arch/arm/include/asm/i8253.h
new file mode 100644
index 000000000000..70656b69d5ce
--- /dev/null
+++ b/arch/arm/include/asm/i8253.h
@@ -0,0 +1,15 @@
1#ifndef __ASMARM_I8253_H
2#define __ASMARM_I8253_H
3
4/* i8253A PIT registers */
5#define PIT_MODE 0x43
6#define PIT_CH0 0x40
7
8#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
9
10extern raw_spinlock_t i8253_lock;
11
12#define outb_pit outb_p
13#define inb_pit inb_p
14
15#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1261b1f928d9..d66605dea55a 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -95,6 +95,15 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
95 return (void __iomem *)addr; 95 return (void __iomem *)addr;
96} 96}
97 97
98/* IO barriers */
99#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
100#define __iormb() rmb()
101#define __iowmb() wmb()
102#else
103#define __iormb() do { } while (0)
104#define __iowmb() do { } while (0)
105#endif
106
98/* 107/*
99 * Now, pick up the machine-defined IO definitions 108 * Now, pick up the machine-defined IO definitions
100 */ 109 */
@@ -125,17 +134,17 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
125 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. 134 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
126 */ 135 */
127#ifdef __io 136#ifdef __io
128#define outb(v,p) __raw_writeb(v,__io(p)) 137#define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); })
129#define outw(v,p) __raw_writew((__force __u16) \ 138#define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \
130 cpu_to_le16(v),__io(p)) 139 cpu_to_le16(v),__io(p)); })
131#define outl(v,p) __raw_writel((__force __u32) \ 140#define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \
132 cpu_to_le32(v),__io(p)) 141 cpu_to_le32(v),__io(p)); })
133 142
134#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __v; }) 143#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
135#define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \ 144#define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \
136 __raw_readw(__io(p))); __v; }) 145 __raw_readw(__io(p))); __iormb(); __v; })
137#define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \ 146#define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \
138 __raw_readl(__io(p))); __v; }) 147 __raw_readl(__io(p))); __iormb(); __v; })
139 148
140#define outsb(p,d,l) __raw_writesb(__io(p),d,l) 149#define outsb(p,d,l) __raw_writesb(__io(p),d,l)
141#define outsw(p,d,l) __raw_writesw(__io(p),d,l) 150#define outsw(p,d,l) __raw_writesw(__io(p),d,l)
@@ -192,14 +201,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
192#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ 201#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
193 cpu_to_le32(v),__mem_pci(c))) 202 cpu_to_le32(v),__mem_pci(c)))
194 203
195#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
196#define __iormb() rmb()
197#define __iowmb() wmb()
198#else
199#define __iormb() do { } while (0)
200#define __iowmb() do { } while (0)
201#endif
202
203#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) 204#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
204#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) 205#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
205#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) 206#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
@@ -241,18 +242,15 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
241 * 242 *
242 */ 243 */
243#ifndef __arch_ioremap 244#ifndef __arch_ioremap
244#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 245#define __arch_ioremap __arm_ioremap
245#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 246#define __arch_iounmap __iounmap
246#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) 247#endif
247#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC) 248
248#define iounmap(cookie) __iounmap(cookie)
249#else
250#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 249#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
251#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 250#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
252#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) 251#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
253#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC) 252#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
254#define iounmap(cookie) __arch_iounmap(cookie) 253#define iounmap __arch_iounmap
255#endif
256 254
257/* 255/*
258 * io{read,write}{8,16,32} macros 256 * io{read,write}{8,16,32} macros
@@ -294,6 +292,7 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
294#define ARCH_HAS_VALID_PHYS_ADDR_RANGE 292#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
295extern int valid_phys_addr_range(unsigned long addr, size_t size); 293extern int valid_phys_addr_range(unsigned long addr, size_t size);
296extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 294extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
295extern int devmem_is_allowed(unsigned long pfn);
297#endif 296#endif
298 297
299/* 298/*
diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h
index 0b30894b5482..9c9629816128 100644
--- a/arch/arm/include/asm/ioctls.h
+++ b/arch/arm/include/asm/ioctls.h
@@ -1,89 +1,8 @@
1#ifndef __ASM_ARM_IOCTLS_H 1#ifndef __ASM_ARM_IOCTLS_H
2#define __ASM_ARM_IOCTLS_H 2#define __ASM_ARM_IOCTLS_H
3 3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46#define TIOCSBRK 0x5427 /* BSD compatibility */
47#define TIOCCBRK 0x5428 /* BSD compatibility */
48#define TIOCGSID 0x5429 /* Return the session ID of FD */
49#define TCGETS2 _IOR('T',0x2A, struct termios2)
50#define TCSETS2 _IOW('T',0x2B, struct termios2)
51#define TCSETSW2 _IOW('T',0x2C, struct termios2)
52#define TCSETSF2 _IOW('T',0x2D, struct termios2)
53#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
54#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
55#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
56
57#define TIOCGRS485 0x542E
58#define TIOCSRS485 0x542F
59
60#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
61#define FIOCLEX 0x5451
62#define FIOASYNC 0x5452
63#define TIOCSERCONFIG 0x5453
64#define TIOCSERGWILD 0x5454
65#define TIOCSERSWILD 0x5455
66#define TIOCGLCKTRMIOS 0x5456
67#define TIOCSLCKTRMIOS 0x5457
68#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
69#define TIOCSERGETLSR 0x5459 /* Get line status register */
70#define TIOCSERGETMULTI 0x545A /* Get multiport config */
71#define TIOCSERSETMULTI 0x545B /* Set multiport config */
72
73#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
74#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
75#define FIOQSIZE 0x545E 4#define FIOQSIZE 0x545E
76 5
77/* Used for packet mode */ 6#include <asm-generic/ioctls.h>
78#define TIOCPKT_DATA 0
79#define TIOCPKT_FLUSHREAD 1
80#define TIOCPKT_FLUSHWRITE 2
81#define TIOCPKT_STOP 4
82#define TIOCPKT_START 8
83#define TIOCPKT_NOSTOP 16
84#define TIOCPKT_DOSTOP 32
85#define TIOCPKT_IOCTL 64
86
87#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
88 7
89#endif 8#endif
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 6d09974e6646..1e6cca55c750 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -10,66 +10,85 @@
10 */ 10 */
11#if __LINUX_ARM_ARCH__ >= 6 11#if __LINUX_ARM_ARCH__ >= 6
12 12
13#define raw_local_irq_save(x) \ 13static inline unsigned long arch_local_irq_save(void)
14 ({ \ 14{
15 __asm__ __volatile__( \ 15 unsigned long flags;
16 "mrs %0, cpsr @ local_irq_save\n" \ 16
17 "cpsid i" \ 17 asm volatile(
18 : "=r" (x) : : "memory", "cc"); \ 18 " mrs %0, cpsr @ arch_local_irq_save\n"
19 }) 19 " cpsid i"
20 : "=r" (flags) : : "memory", "cc");
21 return flags;
22}
23
24static inline void arch_local_irq_enable(void)
25{
26 asm volatile(
27 " cpsie i @ arch_local_irq_enable"
28 :
29 :
30 : "memory", "cc");
31}
32
33static inline void arch_local_irq_disable(void)
34{
35 asm volatile(
36 " cpsid i @ arch_local_irq_disable"
37 :
38 :
39 : "memory", "cc");
40}
20 41
21#define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
22#define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
23#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 42#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
24#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 43#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
25
26#else 44#else
27 45
28/* 46/*
29 * Save the current interrupt enable state & disable IRQs 47 * Save the current interrupt enable state & disable IRQs
30 */ 48 */
31#define raw_local_irq_save(x) \ 49static inline unsigned long arch_local_irq_save(void)
32 ({ \ 50{
33 unsigned long temp; \ 51 unsigned long flags, temp;
34 (void) (&temp == &x); \ 52
35 __asm__ __volatile__( \ 53 asm volatile(
36 "mrs %0, cpsr @ local_irq_save\n" \ 54 " mrs %0, cpsr @ arch_local_irq_save\n"
37" orr %1, %0, #128\n" \ 55 " orr %1, %0, #128\n"
38" msr cpsr_c, %1" \ 56 " msr cpsr_c, %1"
39 : "=r" (x), "=r" (temp) \ 57 : "=r" (flags), "=r" (temp)
40 : \ 58 :
41 : "memory", "cc"); \ 59 : "memory", "cc");
42 }) 60 return flags;
43 61}
62
44/* 63/*
45 * Enable IRQs 64 * Enable IRQs
46 */ 65 */
47#define raw_local_irq_enable() \ 66static inline void arch_local_irq_enable(void)
48 ({ \ 67{
49 unsigned long temp; \ 68 unsigned long temp;
50 __asm__ __volatile__( \ 69 asm volatile(
51 "mrs %0, cpsr @ local_irq_enable\n" \ 70 " mrs %0, cpsr @ arch_local_irq_enable\n"
52" bic %0, %0, #128\n" \ 71 " bic %0, %0, #128\n"
53" msr cpsr_c, %0" \ 72 " msr cpsr_c, %0"
54 : "=r" (temp) \ 73 : "=r" (temp)
55 : \ 74 :
56 : "memory", "cc"); \ 75 : "memory", "cc");
57 }) 76}
58 77
59/* 78/*
60 * Disable IRQs 79 * Disable IRQs
61 */ 80 */
62#define raw_local_irq_disable() \ 81static inline void arch_local_irq_disable(void)
63 ({ \ 82{
64 unsigned long temp; \ 83 unsigned long temp;
65 __asm__ __volatile__( \ 84 asm volatile(
66 "mrs %0, cpsr @ local_irq_disable\n" \ 85 " mrs %0, cpsr @ arch_local_irq_disable\n"
67" orr %0, %0, #128\n" \ 86 " orr %0, %0, #128\n"
68" msr cpsr_c, %0" \ 87 " msr cpsr_c, %0"
69 : "=r" (temp) \ 88 : "=r" (temp)
70 : \ 89 :
71 : "memory", "cc"); \ 90 : "memory", "cc");
72 }) 91}
73 92
74/* 93/*
75 * Enable FIQs 94 * Enable FIQs
@@ -106,27 +125,31 @@
106/* 125/*
107 * Save the current interrupt enable state. 126 * Save the current interrupt enable state.
108 */ 127 */
109#define raw_local_save_flags(x) \ 128static inline unsigned long arch_local_save_flags(void)
110 ({ \ 129{
111 __asm__ __volatile__( \ 130 unsigned long flags;
112 "mrs %0, cpsr @ local_save_flags" \ 131 asm volatile(
113 : "=r" (x) : : "memory", "cc"); \ 132 " mrs %0, cpsr @ local_save_flags"
114 }) 133 : "=r" (flags) : : "memory", "cc");
134 return flags;
135}
115 136
116/* 137/*
117 * restore saved IRQ & FIQ state 138 * restore saved IRQ & FIQ state
118 */ 139 */
119#define raw_local_irq_restore(x) \ 140static inline void arch_local_irq_restore(unsigned long flags)
120 __asm__ __volatile__( \ 141{
121 "msr cpsr_c, %0 @ local_irq_restore\n" \ 142 asm volatile(
122 : \ 143 " msr cpsr_c, %0 @ local_irq_restore"
123 : "r" (x) \ 144 :
124 : "memory", "cc") 145 : "r" (flags)
146 : "memory", "cc");
147}
125 148
126#define raw_irqs_disabled_flags(flags) \ 149static inline int arch_irqs_disabled_flags(unsigned long flags)
127({ \ 150{
128 (int)((flags) & PSR_I_BIT); \ 151 return flags & PSR_I_BIT;
129}) 152}
130 153
131#endif 154#endif
132#endif 155#endif
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 8ec9ef5c3c7b..c2b9b4bdec00 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -33,13 +33,26 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
33 if (oldregs) { 33 if (oldregs) {
34 memcpy(newregs, oldregs, sizeof(*newregs)); 34 memcpy(newregs, oldregs, sizeof(*newregs));
35 } else { 35 } else {
36 __asm__ __volatile__ ("stmia %0, {r0 - r15}" 36 __asm__ __volatile__ (
37 : : "r" (&newregs->ARM_r0)); 37 "stmia %[regs_base], {r0-r12}\n\t"
38 __asm__ __volatile__ ("mrs %0, cpsr" 38 "mov %[_ARM_sp], sp\n\t"
39 : "=r" (newregs->ARM_cpsr)); 39 "str lr, %[_ARM_lr]\n\t"
40 "adr %[_ARM_pc], 1f\n\t"
41 "mrs %[_ARM_cpsr], cpsr\n\t"
42 "1:"
43 : [_ARM_pc] "=r" (newregs->ARM_pc),
44 [_ARM_cpsr] "=r" (newregs->ARM_cpsr),
45 [_ARM_sp] "=r" (newregs->ARM_sp),
46 [_ARM_lr] "=o" (newregs->ARM_lr)
47 : [regs_base] "r" (&newregs->ARM_r0)
48 : "memory"
49 );
40 } 50 }
41} 51}
42 52
53/* Function pointer to optional machine-specific reinitialization */
54extern void (*kexec_reinit)(void);
55
43#endif /* __ASSEMBLY__ */ 56#endif /* __ASSEMBLY__ */
44 57
45#endif /* CONFIG_KEXEC */ 58#endif /* CONFIG_KEXEC */
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 08265993227f..48066ce9ea34 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -70,7 +70,8 @@ extern int kgdb_fault_expected;
70#define _GP_REGS 16 70#define _GP_REGS 16
71#define _FP_REGS 8 71#define _FP_REGS 8
72#define _EXTRA_REGS 2 72#define _EXTRA_REGS 2
73#define DBG_MAX_REG_NUM (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) 73#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
74#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
74 75
75#define KGDB_MAX_NO_CPUS 1 76#define KGDB_MAX_NO_CPUS 1
76#define BUFMAX 400 77#define BUFMAX 400
@@ -93,7 +94,7 @@ extern int kgdb_fault_expected;
93#define _SPT 13 94#define _SPT 13
94#define _LR 14 95#define _LR 14
95#define _PC 15 96#define _PC 15
96#define _CPSR (DBG_MAX_REG_NUM - 1) 97#define _CPSR (GDB_MAX_REGS - 1)
97 98
98/* 99/*
99 * So that we can denote the end of a frame for tracing, 100 * So that we can denote the end of a frame for tracing,
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index bb8a19bd5822..e46bdd0097eb 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -39,10 +39,13 @@ typedef u32 kprobe_opcode_t;
39struct kprobe; 39struct kprobe;
40typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); 40typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
41 41
42typedef unsigned long (kprobe_check_cc)(unsigned long);
43
42/* Architecture specific copy of original instruction. */ 44/* Architecture specific copy of original instruction. */
43struct arch_specific_insn { 45struct arch_specific_insn {
44 kprobe_opcode_t *insn; 46 kprobe_opcode_t *insn;
45 kprobe_insn_handler_t *insn_handler; 47 kprobe_insn_handler_t *insn_handler;
48 kprobe_check_cc *insn_check_cc;
46}; 49};
47 50
48struct prev_kprobe { 51struct prev_kprobe {
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 50c7e7cfd670..080d74f8128d 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
30#include "smp_twd.h" 30#include "smp_twd.h"
31 31
32#define local_timer_ack() twd_timer_ack() 32#define local_timer_ack() twd_timer_ack()
33#define local_timer_stop() twd_timer_stop()
34 33
35#else 34#else
36 35
@@ -40,24 +39,19 @@ asmlinkage void do_local_timer(struct pt_regs *);
40 */ 39 */
41int local_timer_ack(void); 40int local_timer_ack(void);
42 41
43/*
44 * Stop a local timer interrupt.
45 */
46void local_timer_stop(void);
47
48#endif 42#endif
49 43
50/* 44/*
51 * Setup a local timer interrupt for a CPU. 45 * Setup a local timer interrupt for a CPU.
52 */ 46 */
53void local_timer_setup(struct clock_event_device *); 47int local_timer_setup(struct clock_event_device *);
54 48
55#else 49#else
56 50
57static inline void local_timer_stop(void) 51static inline int local_timer_setup(struct clock_event_device *evt)
58{ 52{
53 return -ENXIO;
59} 54}
60
61#endif 55#endif
62 56
63#endif 57#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 8a0dd18ba642..946f4d778f71 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -15,18 +15,13 @@ struct meminfo;
15struct sys_timer; 15struct sys_timer;
16 16
17struct machine_desc { 17struct machine_desc {
18 /*
19 * Note! The first four elements are used
20 * by assembler code in head.S, head-common.S
21 */
22 unsigned int nr; /* architecture number */ 18 unsigned int nr; /* architecture number */
23 unsigned int nr_irqs; /* number of IRQs */
24 unsigned int phys_io; /* start of physical io */
25 unsigned int io_pg_offst; /* byte offset for io
26 * page tabe entry */
27
28 const char *name; /* architecture name */ 19 const char *name; /* architecture name */
29 unsigned long boot_params; /* tagged list */ 20 unsigned long boot_params; /* tagged list */
21 const char **dt_compat; /* array of device tree
22 * 'compatible' strings */
23
24 unsigned int nr_irqs; /* number of IRQs */
30 25
31 unsigned int video_start; /* start of video RAM */ 26 unsigned int video_start; /* start of video RAM */
32 unsigned int video_end; /* end of video RAM */ 27 unsigned int video_end; /* end of video RAM */
@@ -40,12 +35,28 @@ struct machine_desc {
40 struct meminfo *); 35 struct meminfo *);
41 void (*reserve)(void);/* reserve mem blocks */ 36 void (*reserve)(void);/* reserve mem blocks */
42 void (*map_io)(void);/* IO mapping function */ 37 void (*map_io)(void);/* IO mapping function */
38 void (*init_early)(void);
43 void (*init_irq)(void); 39 void (*init_irq)(void);
44 struct sys_timer *timer; /* system tick timer */ 40 struct sys_timer *timer; /* system tick timer */
45 void (*init_machine)(void); 41 void (*init_machine)(void);
42#ifdef CONFIG_MULTI_IRQ_HANDLER
43 void (*handle_irq)(struct pt_regs *);
44#endif
46}; 45};
47 46
48/* 47/*
48 * Current machine - only accessible during boot.
49 */
50extern struct machine_desc *machine_desc;
51
52/*
53 * Machine type table - also only accessible during boot
54 */
55extern struct machine_desc __arch_info_begin[], __arch_info_end[];
56#define for_each_machine_desc(p) \
57 for (p = __arch_info_begin; p < __arch_info_end; p++)
58
59/*
49 * Set of macros to define architecture features. This is built into 60 * Set of macros to define architecture features. This is built into
50 * a table by the linker. 61 * a table by the linker.
51 */ 62 */
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index ce3eee9fe26c..febe495d0c6e 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -17,10 +17,12 @@ struct seq_file;
17/* 17/*
18 * This is internal. Do not use it. 18 * This is internal. Do not use it.
19 */ 19 */
20extern unsigned int arch_nr_irqs;
21extern void (*init_arch_irq)(void);
22extern void init_FIQ(void); 20extern void init_FIQ(void);
23extern int show_fiq_list(struct seq_file *, void *); 21extern int show_fiq_list(struct seq_file *, int);
22
23#ifdef CONFIG_MULTI_IRQ_HANDLER
24extern void (*handle_arch_irq)(struct pt_regs *);
25#endif
24 26
25/* 27/*
26 * This is for easy migration, but should be changed in the source 28 * This is for easy migration, but should be changed in the source
@@ -32,4 +34,35 @@ do { \
32 raw_spin_unlock(&desc->lock); \ 34 raw_spin_unlock(&desc->lock); \
33} while(0) 35} while(0)
34 36
37#ifndef __ASSEMBLY__
38/*
39 * Entry/exit functions for chained handlers where the primary IRQ chip
40 * may implement either fasteoi or level-trigger flow control.
41 */
42static inline void chained_irq_enter(struct irq_chip *chip,
43 struct irq_desc *desc)
44{
45 /* FastEOI controllers require no action on entry. */
46 if (chip->irq_eoi)
47 return;
48
49 if (chip->irq_mask_ack) {
50 chip->irq_mask_ack(&desc->irq_data);
51 } else {
52 chip->irq_mask(&desc->irq_data);
53 if (chip->irq_ack)
54 chip->irq_ack(&desc->irq_data);
55 }
56}
57
58static inline void chained_irq_exit(struct irq_chip *chip,
59 struct irq_desc *desc)
60{
61 if (chip->irq_eoi)
62 chip->irq_eoi(&desc->irq_data);
63 else
64 chip->irq_unmask(&desc->irq_data);
65}
66#endif
67
35#endif 68#endif
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 35d408f6dccf..d5adaae5ee2c 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -34,7 +34,6 @@
34 * timer interrupt which may be pending. 34 * timer interrupt which may be pending.
35 */ 35 */
36struct sys_timer { 36struct sys_timer {
37 struct sys_device dev;
38 void (*init)(void); 37 void (*init)(void);
39 void (*suspend)(void); 38 void (*suspend)(void);
40 void (*resume)(void); 39 void (*resume)(void);
@@ -43,7 +42,6 @@ struct sys_timer {
43#endif 42#endif
44}; 43};
45 44
46extern struct sys_timer *system_timer;
47extern void timer_tick(void); 45extern void timer_tick(void);
48 46
49#endif 47#endif
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h
index 833306ee9e7f..ea297ac70bc6 100644
--- a/arch/arm/include/asm/mach/udc_pxa2xx.h
+++ b/arch/arm/include/asm/mach/udc_pxa2xx.h
@@ -20,8 +20,6 @@ struct pxa2xx_udc_mach_info {
20 * VBUS IRQ and omit the methods above. Store the GPIO number 20 * VBUS IRQ and omit the methods above. Store the GPIO number
21 * here. Note that sometimes the signals go through inverters... 21 * here. Note that sometimes the signals go through inverters...
22 */ 22 */
23 bool gpio_vbus_inverted;
24 int gpio_vbus; /* high == vbus present */
25 bool gpio_pullup_inverted; 23 bool gpio_pullup_inverted;
26 int gpio_pullup; /* high == pullup activated */ 24 int gpio_pullup; /* high == pullup activated */
27}; 25};
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index fdbc43b2e6c0..b8da2e415e4e 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,13 +1,6 @@
1#ifndef _ASM_ARM_MEMBLOCK_H 1#ifndef _ASM_ARM_MEMBLOCK_H
2#define _ASM_ARM_MEMBLOCK_H 2#define _ASM_ARM_MEMBLOCK_H
3 3
4#ifdef CONFIG_MMU
5extern phys_addr_t lowmem_end_addr;
6#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
7#else
8#define MEMBLOCK_REAL_LIMIT 0
9#endif
10
11struct meminfo; 4struct meminfo;
12struct machine_desc; 5struct machine_desc;
13 6
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 23c2e8e5c0fa..af44a8fb3480 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/const.h> 17#include <linux/const.h>
18#include <linux/types.h>
18#include <mach/memory.h> 19#include <mach/memory.h>
19#include <asm/sizes.h> 20#include <asm/sizes.h>
20 21
@@ -133,20 +134,10 @@
133#endif 134#endif
134 135
135/* 136/*
136 * Physical vs virtual RAM address space conversion. These are
137 * private definitions which should NOT be used outside memory.h
138 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
139 */
140#ifndef __virt_to_phys
141#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
142#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
143#endif
144
145/*
146 * Convert a physical address to a Page Frame Number and back 137 * Convert a physical address to a Page Frame Number and back
147 */ 138 */
148#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) 139#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
149#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 140#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
150 141
151/* 142/*
152 * Convert a page to/from a physical address 143 * Convert a page to/from a physical address
@@ -157,19 +148,71 @@
157#ifndef __ASSEMBLY__ 148#ifndef __ASSEMBLY__
158 149
159/* 150/*
151 * Physical vs virtual RAM address space conversion. These are
152 * private definitions which should NOT be used outside memory.h
153 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
154 */
155#ifndef __virt_to_phys
156#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
157
158/*
159 * Constants used to force the right instruction encodings and shifts
160 * so that all we need to do is modify the 8-bit constant field.
161 */
162#define __PV_BITS_31_24 0x81000000
163#define __PV_BITS_23_16 0x00810000
164
165extern unsigned long __pv_phys_offset;
166#define PHYS_OFFSET __pv_phys_offset
167
168#define __pv_stub(from,to,instr,type) \
169 __asm__("@ __pv_stub\n" \
170 "1: " instr " %0, %1, %2\n" \
171 " .pushsection .pv_table,\"a\"\n" \
172 " .long 1b\n" \
173 " .popsection\n" \
174 : "=r" (to) \
175 : "r" (from), "I" (type))
176
177static inline unsigned long __virt_to_phys(unsigned long x)
178{
179 unsigned long t;
180 __pv_stub(x, t, "add", __PV_BITS_31_24);
181#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
182 __pv_stub(t, t, "add", __PV_BITS_23_16);
183#endif
184 return t;
185}
186
187static inline unsigned long __phys_to_virt(unsigned long x)
188{
189 unsigned long t;
190 __pv_stub(x, t, "sub", __PV_BITS_31_24);
191#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
192 __pv_stub(t, t, "sub", __PV_BITS_23_16);
193#endif
194 return t;
195}
196#else
197#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
198#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
199#endif
200#endif
201
202#ifndef PHYS_OFFSET
203#define PHYS_OFFSET PLAT_PHYS_OFFSET
204#endif
205
206/*
160 * The DMA mask corresponding to the maximum bus address allocatable 207 * The DMA mask corresponding to the maximum bus address allocatable
161 * using GFP_DMA. The default here places no restriction on DMA 208 * using GFP_DMA. The default here places no restriction on DMA
162 * allocations. This must be the smallest DMA mask in the system, 209 * allocations. This must be the smallest DMA mask in the system,
163 * so a successful GFP_DMA allocation will always satisfy this. 210 * so a successful GFP_DMA allocation will always satisfy this.
164 */ 211 */
165#ifndef ISA_DMA_THRESHOLD 212#ifndef ARM_DMA_ZONE_SIZE
166#define ISA_DMA_THRESHOLD (0xffffffffULL) 213#define ISA_DMA_THRESHOLD (0xffffffffULL)
167#endif 214#else
168 215#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
169#ifndef arch_adjust_zones
170#define arch_adjust_zones(size,holes) do { } while (0)
171#elif !defined(CONFIG_ZONE_DMA)
172#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
173#endif 216#endif
174 217
175/* 218/*
@@ -188,12 +231,12 @@
188 * translation for translating DMA addresses. Use the driver 231 * translation for translating DMA addresses. Use the driver
189 * DMA support - see dma-mapping.h. 232 * DMA support - see dma-mapping.h.
190 */ 233 */
191static inline unsigned long virt_to_phys(void *x) 234static inline phys_addr_t virt_to_phys(const volatile void *x)
192{ 235{
193 return __virt_to_phys((unsigned long)(x)); 236 return __virt_to_phys((unsigned long)(x));
194} 237}
195 238
196static inline void *phys_to_virt(unsigned long x) 239static inline void *phys_to_virt(phys_addr_t x)
197{ 240{
198 return (void *)(__phys_to_virt((unsigned long)(x))); 241 return (void *)(__phys_to_virt((unsigned long)(x)));
199} 242}
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 68870c776671..b4ffe9d5b526 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -13,6 +13,10 @@ typedef struct {
13 13
14#ifdef CONFIG_CPU_HAS_ASID 14#ifdef CONFIG_CPU_HAS_ASID
15#define ASID(mm) ((mm)->context.id & 255) 15#define ASID(mm) ((mm)->context.id & 255)
16
17/* init_mm.context.id_lock should be initialized. */
18#define INIT_MM_CONTEXT(name) \
19 .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
16#else 20#else
17#define ASID(mm) (0) 21#define ASID(mm) (0)
18#endif 22#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac0547c..71605d9f8e42 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,7 +18,6 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cachetype.h> 19#include <asm/cachetype.h>
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm-generic/mm_hooks.h>
22 21
23void __check_kvm_seq(struct mm_struct *mm); 22void __check_kvm_seq(struct mm_struct *mm);
24 23
@@ -134,4 +133,32 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
134#define deactivate_mm(tsk,mm) do { } while (0) 133#define deactivate_mm(tsk,mm) do { } while (0)
135#define activate_mm(prev,next) switch_mm(prev, next, NULL) 134#define activate_mm(prev,next) switch_mm(prev, next, NULL)
136 135
136/*
137 * We are inserting a "fake" vma for the user-accessible vector page so
138 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
139 * But we also want to remove it before the generic code gets to see it
140 * during process exit or the unmapping of it would cause total havoc.
141 * (the macro is used as remove_vma() is static to mm/mmap.c)
142 */
143#define arch_exit_mmap(mm) \
144do { \
145 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
146 if (high_vma) { \
147 BUG_ON(high_vma->vm_next); /* it should be last */ \
148 if (high_vma->vm_prev) \
149 high_vma->vm_prev->vm_next = NULL; \
150 else \
151 mm->mmap = NULL; \
152 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
153 mm->mmap_cache = NULL; \
154 mm->map_count--; \
155 remove_vma(high_vma); \
156 } \
157} while (0)
158
159static inline void arch_dup_mmap(struct mm_struct *oldmm,
160 struct mm_struct *mm)
161{
162}
163
137#endif 164#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index e4dfa69abb68..543b44916d2c 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -7,24 +7,49 @@
7 7
8struct unwind_table; 8struct unwind_table;
9 9
10struct mod_arch_specific
11{
12#ifdef CONFIG_ARM_UNWIND 10#ifdef CONFIG_ARM_UNWIND
13 Elf_Shdr *unw_sec_init; 11enum {
14 Elf_Shdr *unw_sec_devinit; 12 ARM_SEC_INIT,
15 Elf_Shdr *unw_sec_core; 13 ARM_SEC_DEVINIT,
16 Elf_Shdr *sec_init_text; 14 ARM_SEC_CORE,
17 Elf_Shdr *sec_devinit_text; 15 ARM_SEC_EXIT,
18 Elf_Shdr *sec_core_text; 16 ARM_SEC_DEVEXIT,
19 struct unwind_table *unwind_init; 17 ARM_SEC_MAX,
20 struct unwind_table *unwind_devinit; 18};
21 struct unwind_table *unwind_core; 19#endif
20
21struct mod_arch_specific {
22#ifdef CONFIG_ARM_UNWIND
23 struct unwind_table *unwind[ARM_SEC_MAX];
22#endif 24#endif
23}; 25};
24 26
25/* 27/*
26 * Include the ARM architecture version. 28 * Add the ARM architecture version to the version magic string
27 */ 29 */
28#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " 30#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
31
32/* Add __virt_to_phys patching state as well */
33#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
34#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
35#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
36#else
37#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
38#endif
39#else
40#define MODULE_ARCH_VERMAGIC_P2V ""
41#endif
42
43/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
44#ifdef CONFIG_THUMB2_KERNEL
45#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
46#else
47#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
48#endif
49
50#define MODULE_ARCH_VERMAGIC \
51 MODULE_ARCH_VERMAGIC_ARMVSN \
52 MODULE_ARCH_VERMAGIC_ARMTHUMB \
53 MODULE_ARCH_VERMAGIC_P2V
29 54
30#endif /* _ASM_ARM_MODULE_H */ 55#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 25f76bae57ab..d8387437ec5a 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -21,43 +21,70 @@
21#ifndef __ASM_OUTERCACHE_H 21#ifndef __ASM_OUTERCACHE_H
22#define __ASM_OUTERCACHE_H 22#define __ASM_OUTERCACHE_H
23 23
24#include <linux/types.h>
25
24struct outer_cache_fns { 26struct outer_cache_fns {
25 void (*inv_range)(unsigned long, unsigned long); 27 void (*inv_range)(unsigned long, unsigned long);
26 void (*clean_range)(unsigned long, unsigned long); 28 void (*clean_range)(unsigned long, unsigned long);
27 void (*flush_range)(unsigned long, unsigned long); 29 void (*flush_range)(unsigned long, unsigned long);
30 void (*flush_all)(void);
31 void (*inv_all)(void);
32 void (*disable)(void);
28#ifdef CONFIG_OUTER_CACHE_SYNC 33#ifdef CONFIG_OUTER_CACHE_SYNC
29 void (*sync)(void); 34 void (*sync)(void);
30#endif 35#endif
36 void (*set_debug)(unsigned long);
31}; 37};
32 38
33#ifdef CONFIG_OUTER_CACHE 39#ifdef CONFIG_OUTER_CACHE
34 40
35extern struct outer_cache_fns outer_cache; 41extern struct outer_cache_fns outer_cache;
36 42
37static inline void outer_inv_range(unsigned long start, unsigned long end) 43static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
38{ 44{
39 if (outer_cache.inv_range) 45 if (outer_cache.inv_range)
40 outer_cache.inv_range(start, end); 46 outer_cache.inv_range(start, end);
41} 47}
42static inline void outer_clean_range(unsigned long start, unsigned long end) 48static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
43{ 49{
44 if (outer_cache.clean_range) 50 if (outer_cache.clean_range)
45 outer_cache.clean_range(start, end); 51 outer_cache.clean_range(start, end);
46} 52}
47static inline void outer_flush_range(unsigned long start, unsigned long end) 53static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
48{ 54{
49 if (outer_cache.flush_range) 55 if (outer_cache.flush_range)
50 outer_cache.flush_range(start, end); 56 outer_cache.flush_range(start, end);
51} 57}
52 58
59static inline void outer_flush_all(void)
60{
61 if (outer_cache.flush_all)
62 outer_cache.flush_all();
63}
64
65static inline void outer_inv_all(void)
66{
67 if (outer_cache.inv_all)
68 outer_cache.inv_all();
69}
70
71static inline void outer_disable(void)
72{
73 if (outer_cache.disable)
74 outer_cache.disable();
75}
76
53#else 77#else
54 78
55static inline void outer_inv_range(unsigned long start, unsigned long end) 79static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
56{ } 80{ }
57static inline void outer_clean_range(unsigned long start, unsigned long end) 81static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
58{ } 82{ }
59static inline void outer_flush_range(unsigned long start, unsigned long end) 83static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
60{ } 84{ }
85static inline void outer_flush_all(void) { }
86static inline void outer_inv_all(void) { }
87static inline void outer_disable(void) { }
61 88
62#endif 89#endif
63 90
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index a485ac3c8696..ac75d0848889 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
152extern void copy_page(void *to, const void *from); 152extern void copy_page(void *to, const void *from);
153 153
154typedef unsigned long pteval_t;
155
154#undef STRICT_MM_TYPECHECKS 156#undef STRICT_MM_TYPECHECKS
155 157
156#ifdef STRICT_MM_TYPECHECKS 158#ifdef STRICT_MM_TYPECHECKS
157/* 159/*
158 * These are used to make use of C type-checking.. 160 * These are used to make use of C type-checking..
159 */ 161 */
160typedef struct { unsigned long pte; } pte_t; 162typedef struct { pteval_t pte; } pte_t;
161typedef struct { unsigned long pmd; } pmd_t; 163typedef struct { unsigned long pmd; } pmd_t;
162typedef struct { unsigned long pgd[2]; } pgd_t; 164typedef struct { unsigned long pgd[2]; } pgd_t;
163typedef struct { unsigned long pgprot; } pgprot_t; 165typedef struct { unsigned long pgprot; } pgprot_t;
@@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
175/* 177/*
176 * .. while these make it easier on the compiler 178 * .. while these make it easier on the compiler
177 */ 179 */
178typedef unsigned long pte_t; 180typedef pteval_t pte_t;
179typedef unsigned long pmd_t; 181typedef unsigned long pmd_t;
180typedef unsigned long pgd_t[2]; 182typedef unsigned long pgd_t[2];
181typedef unsigned long pgprot_t; 183typedef unsigned long pgprot_t;
@@ -195,7 +197,7 @@ typedef unsigned long pgprot_t;
195 197
196typedef struct page *pgtable_t; 198typedef struct page *pgtable_t;
197 199
198#ifndef CONFIG_SPARSEMEM 200#ifdef CONFIG_HAVE_ARCH_PFN_VALID
199extern int pfn_valid(unsigned long); 201extern int pfn_valid(unsigned long);
200#endif 202#endif
201 203
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index b5799a3b7117..c4aa4e8c6af9 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,18 +12,6 @@
12#ifndef __ARM_PERF_EVENT_H__ 12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__ 13#define __ARM_PERF_EVENT_H__
14 14
15/*
16 * NOP: on *most* (read: all supported) ARM platforms, the performance
17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with
20 * interrupts disabled.
21 */
22static inline void
23set_perf_event_pending(void)
24{
25}
26
27/* ARM performance counters start from 1 (in the cp15 accesses) so use the 15/* ARM performance counters start from 1 (in the cp15 accesses) so use the
28 * same indexes here for consistency. */ 16 * same indexes here for consistency. */
29#define PERF_EVENT_INDEX_OFFSET 1 17#define PERF_EVENT_INDEX_OFFSET 1
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index b12cc98bbe04..22de005f159c 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
10#ifndef _ASMARM_PGALLOC_H 10#ifndef _ASMARM_PGALLOC_H
11#define _ASMARM_PGALLOC_H 11#define _ASMARM_PGALLOC_H
12 12
13#include <linux/pagemap.h>
14
13#include <asm/domain.h> 15#include <asm/domain.h>
14#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
15#include <asm/processor.h> 17#include <asm/processor.h>
@@ -30,14 +32,16 @@
30#define pmd_free(mm, pmd) do { } while (0) 32#define pmd_free(mm, pmd) do { } while (0)
31#define pgd_populate(mm,pmd,pte) BUG() 33#define pgd_populate(mm,pmd,pte) BUG()
32 34
33extern pgd_t *get_pgd_slow(struct mm_struct *mm); 35extern pgd_t *pgd_alloc(struct mm_struct *mm);
34extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); 36extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
35
36#define pgd_alloc(mm) get_pgd_slow(mm)
37#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
38 37
39#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 38#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
40 39
40static inline void clean_pte_table(pte_t *pte)
41{
42 clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
43}
44
41/* 45/*
42 * Allocate one PTE table. 46 * Allocate one PTE table.
43 * 47 *
@@ -45,14 +49,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
45 * into one table thus: 49 * into one table thus:
46 * 50 *
47 * +------------+ 51 * +------------+
48 * | h/w pt 0 |
49 * +------------+
50 * | h/w pt 1 |
51 * +------------+
52 * | Linux pt 0 | 52 * | Linux pt 0 |
53 * +------------+ 53 * +------------+
54 * | Linux pt 1 | 54 * | Linux pt 1 |
55 * +------------+ 55 * +------------+
56 * | h/w pt 0 |
57 * +------------+
58 * | h/w pt 1 |
59 * +------------+
56 */ 60 */
57static inline pte_t * 61static inline pte_t *
58pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 62pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
@@ -60,10 +64,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
60 pte_t *pte; 64 pte_t *pte;
61 65
62 pte = (pte_t *)__get_free_page(PGALLOC_GFP); 66 pte = (pte_t *)__get_free_page(PGALLOC_GFP);
63 if (pte) { 67 if (pte)
64 clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); 68 clean_pte_table(pte);
65 pte += PTRS_PER_PTE;
66 }
67 69
68 return pte; 70 return pte;
69} 71}
@@ -79,10 +81,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
79 pte = alloc_pages(PGALLOC_GFP, 0); 81 pte = alloc_pages(PGALLOC_GFP, 0);
80#endif 82#endif
81 if (pte) { 83 if (pte) {
82 if (!PageHighMem(pte)) { 84 if (!PageHighMem(pte))
83 void *page = page_address(pte); 85 clean_pte_table(page_address(pte));
84 clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
85 }
86 pgtable_page_ctor(pte); 86 pgtable_page_ctor(pte);
87 } 87 }
88 88
@@ -94,10 +94,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
94 */ 94 */
95static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 95static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
96{ 96{
97 if (pte) { 97 if (pte)
98 pte -= PTRS_PER_PTE;
99 free_page((unsigned long)pte); 98 free_page((unsigned long)pte);
100 }
101} 99}
102 100
103static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 101static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -106,8 +104,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
106 __free_page(pte); 104 __free_page(pte);
107} 105}
108 106
109static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) 107static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
108 unsigned long prot)
110{ 109{
110 unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot;
111 pmdp[0] = __pmd(pmdval); 111 pmdp[0] = __pmd(pmdval);
112 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); 112 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
113 flush_pmd_entry(pmdp); 113 flush_pmd_entry(pmdp);
@@ -122,20 +122,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
122static inline void 122static inline void
123pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) 123pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
124{ 124{
125 unsigned long pte_ptr = (unsigned long)ptep;
126
127 /* 125 /*
128 * The pmd must be loaded with the physical 126 * The pmd must be loaded with the physical address of the PTE table
129 * address of the PTE table
130 */ 127 */
131 pte_ptr -= PTRS_PER_PTE * sizeof(void *); 128 __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
132 __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
133} 129}
134 130
135static inline void 131static inline void
136pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) 132pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
137{ 133{
138 __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); 134 __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
139} 135}
140#define pmd_pgtable(pmd) pmd_page(pmd) 136#define pmd_pgtable(pmd) pmd_page(pmd)
141 137
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index e90b167ea848..5750704e0271 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -10,6 +10,7 @@
10#ifndef _ASMARM_PGTABLE_H 10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H 11#define _ASMARM_PGTABLE_H
12 12
13#include <linux/const.h>
13#include <asm-generic/4level-fixup.h> 14#include <asm-generic/4level-fixup.h>
14#include <asm/proc-fns.h> 15#include <asm/proc-fns.h>
15 16
@@ -54,7 +55,7 @@
54 * Therefore, we tweak the implementation slightly - we tell Linux that we 55 * Therefore, we tweak the implementation slightly - we tell Linux that we
55 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 56 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
56 * hardware pointers to the second level.) The second level contains two 57 * hardware pointers to the second level.) The second level contains two
57 * hardware PTE tables arranged contiguously, followed by Linux versions 58 * hardware PTE tables arranged contiguously, preceded by Linux versions
58 * which contain the state information Linux needs. We, therefore, end up 59 * which contain the state information Linux needs. We, therefore, end up
59 * with 512 entries in the "PTE" level. 60 * with 512 entries in the "PTE" level.
60 * 61 *
@@ -62,15 +63,15 @@
62 * 63 *
63 * pgd pte 64 * pgd pte
64 * | | 65 * | |
65 * +--------+ +0 66 * +--------+
66 * | |-----> +------------+ +0 67 * | | +------------+ +0
68 * +- - - - + | Linux pt 0 |
69 * | | +------------+ +1024
70 * +--------+ +0 | Linux pt 1 |
71 * | |-----> +------------+ +2048
67 * +- - - - + +4 | h/w pt 0 | 72 * +- - - - + +4 | h/w pt 0 |
68 * | |-----> +------------+ +1024 73 * | |-----> +------------+ +3072
69 * +--------+ +8 | h/w pt 1 | 74 * +--------+ +8 | h/w pt 1 |
70 * | | +------------+ +2048
71 * +- - - - + | Linux pt 0 |
72 * | | +------------+ +3072
73 * +--------+ | Linux pt 1 |
74 * | | +------------+ +4096 75 * | | +------------+ +4096
75 * 76 *
76 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 77 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
@@ -102,6 +103,10 @@
102#define PTRS_PER_PMD 1 103#define PTRS_PER_PMD 1
103#define PTRS_PER_PGD 2048 104#define PTRS_PER_PGD 2048
104 105
106#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
107#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
108#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
109
105/* 110/*
106 * PMD_SHIFT determines the size of the area a second-level page table can map 111 * PMD_SHIFT determines the size of the area a second-level page table can map
107 * PGDIR_SHIFT determines what a third-level page table entry can map 112 * PGDIR_SHIFT determines what a third-level page table entry can map
@@ -112,13 +117,13 @@
112#define LIBRARY_TEXT_START 0x0c000000 117#define LIBRARY_TEXT_START 0x0c000000
113 118
114#ifndef __ASSEMBLY__ 119#ifndef __ASSEMBLY__
115extern void __pte_error(const char *file, int line, unsigned long val); 120extern void __pte_error(const char *file, int line, pte_t);
116extern void __pmd_error(const char *file, int line, unsigned long val); 121extern void __pmd_error(const char *file, int line, pmd_t);
117extern void __pgd_error(const char *file, int line, unsigned long val); 122extern void __pgd_error(const char *file, int line, pgd_t);
118 123
119#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 124#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
120#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 125#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
121#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 126#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
122#endif /* !__ASSEMBLY__ */ 127#endif /* !__ASSEMBLY__ */
123 128
124#define PMD_SIZE (1UL << PMD_SHIFT) 129#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
133 */ 138 */
134#define FIRST_USER_ADDRESS PAGE_SIZE 139#define FIRST_USER_ADDRESS PAGE_SIZE
135 140
136#define FIRST_USER_PGD_NR 1 141#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
137#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
138 142
139/* 143/*
140 * section address mask and size definitions. 144 * section address mask and size definitions.
@@ -161,30 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
161 * The PTE table pointer refers to the hardware entries; the "Linux" 165 * The PTE table pointer refers to the hardware entries; the "Linux"
162 * entries are stored 1024 bytes below. 166 * entries are stored 1024 bytes below.
163 */ 167 */
164#define L_PTE_PRESENT (1 << 0) 168#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
165#define L_PTE_YOUNG (1 << 1) 169#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
166#define L_PTE_FILE (1 << 2) /* only when !PRESENT */ 170#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
167#define L_PTE_DIRTY (1 << 6) 171#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
168#define L_PTE_WRITE (1 << 7) 172#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
169#define L_PTE_USER (1 << 8) 173#define L_PTE_USER (_AT(pteval_t, 1) << 8)
170#define L_PTE_EXEC (1 << 9) 174#define L_PTE_XN (_AT(pteval_t, 1) << 9)
171#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ 175#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
172 176
173/* 177/*
174 * These are the memory types, defined to be compatible with 178 * These are the memory types, defined to be compatible with
175 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 179 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
176 */ 180 */
177#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ 181#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
178#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ 182#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
179#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ 183#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
180#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ 184#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
181#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ 185#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
182#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ 186#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
183#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ 187#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
184#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ 188#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
185#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ 189#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
186#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ 190#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
187#define L_PTE_MT_MASK (0x0f << 2) 191#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
188 192
189#ifndef __ASSEMBLY__ 193#ifndef __ASSEMBLY__
190 194
@@ -201,23 +205,44 @@ extern pgprot_t pgprot_kernel;
201 205
202#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 206#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
203 207
204#define PAGE_NONE pgprot_user 208#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
205#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) 209#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
206#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) 210#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
207#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) 211#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
208#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) 212#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
209#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) 213#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
210#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) 214#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
211#define PAGE_KERNEL pgprot_kernel 215#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
212#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) 216#define PAGE_KERNEL_EXEC pgprot_kernel
213 217
214#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) 218#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
215#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) 219#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
216#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) 220#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
217#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 221#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
218#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) 222#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
219#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 223#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
220#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) 224#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
225
226#define __pgprot_modify(prot,mask,bits) \
227 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
228
229#define pgprot_noncached(prot) \
230 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
231
232#define pgprot_writecombine(prot) \
233 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
234
235#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
236#define pgprot_dmacoherent(prot) \
237 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
238#define __HAVE_PHYS_MEM_ACCESS_PROT
239struct file;
240extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
241 unsigned long size, pgprot_t vma_prot);
242#else
243#define pgprot_dmacoherent(prot) \
244 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
245#endif
221 246
222#endif /* __ASSEMBLY__ */ 247#endif /* __ASSEMBLY__ */
223 248
@@ -255,76 +280,32 @@ extern pgprot_t pgprot_kernel;
255extern struct page *empty_zero_page; 280extern struct page *empty_zero_page;
256#define ZERO_PAGE(vaddr) (empty_zero_page) 281#define ZERO_PAGE(vaddr) (empty_zero_page)
257 282
258#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
259#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
260
261#define pte_none(pte) (!pte_val(pte))
262#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
263#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
264#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
265 283
266#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr)) 284extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
267#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
268#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
269#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
270 285
271#ifndef CONFIG_HIGHPTE 286/* to find an entry in a page-table-directory */
272#define __pte_map(dir,km) pmd_page_vaddr(*(dir)) 287#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
273#define __pte_unmap(pte,km) do { } while (0)
274#else
275#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
276#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km)
277#endif
278 288
279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 289#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
280 290
281#define set_pte_at(mm,addr,ptep,pteval) do { \ 291/* to find an entry in a kernel page-table-directory */
282 set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \ 292#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
283 } while (0)
284 293
285/* 294/*
286 * The following only work if pte_present() is true. 295 * The "pgd_xxx()" functions here are trivial for a folded two-level
287 * Undefined behaviour if not.. 296 * setup: the pgd is never bad, and a pmd always exists (as it's folded
297 * into the pgd entry)
288 */ 298 */
289#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 299#define pgd_none(pgd) (0)
290#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 300#define pgd_bad(pgd) (0)
291#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 301#define pgd_present(pgd) (1)
292#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 302#define pgd_clear(pgdp) do { } while (0)
293#define pte_special(pte) (0) 303#define set_pgd(pgd,pgdp) do { } while (0)
294 304#define set_pud(pud,pudp) do { } while (0)
295#define PTE_BIT_FUNC(fn,op) \
296static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
297
298PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
299PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
300PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
301PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
302PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
303PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
304
305static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
306 305
307#define __pgprot_modify(prot,mask,bits) \
308 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
309 306
310/* 307/* Find an entry in the second-level page table.. */
311 * Mark the prot value as uncacheable and unbufferable. 308#define pmd_offset(dir, addr) ((pmd_t *)(dir))
312 */
313#define pgprot_noncached(prot) \
314 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
315#define pgprot_writecombine(prot) \
316 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
318#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
320#define __HAVE_PHYS_MEM_ACCESS_PROT
321struct file;
322extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
323 unsigned long size, pgprot_t vma_prot);
324#else
325#define pgprot_dmacoherent(prot) \
326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
327#endif
328 309
329#define pmd_none(pmd) (!pmd_val(pmd)) 310#define pmd_none(pmd) (!pmd_val(pmd))
330#define pmd_present(pmd) (pmd_val(pmd)) 311#define pmd_present(pmd) (pmd_val(pmd))
@@ -346,56 +327,89 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
346 327
347static inline pte_t *pmd_page_vaddr(pmd_t pmd) 328static inline pte_t *pmd_page_vaddr(pmd_t pmd)
348{ 329{
349 unsigned long ptr; 330 return __va(pmd_val(pmd) & PAGE_MASK);
350
351 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
352 ptr += PTRS_PER_PTE * sizeof(void *);
353
354 return __va(ptr);
355} 331}
356 332
357#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) 333#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
358 334
359/* 335/* we don't need complex calculations here as the pmd is folded into the pgd */
360 * Conversion functions: convert a page and protection to a page entry, 336#define pmd_addr_end(addr,end) (end)
361 * and a page entry and page directory to the page they refer to.
362 */
363#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
364 337
365/*
366 * The "pgd_xxx()" functions here are trivial for a folded two-level
367 * setup: the pgd is never bad, and a pmd always exists (as it's folded
368 * into the pgd entry)
369 */
370#define pgd_none(pgd) (0)
371#define pgd_bad(pgd) (0)
372#define pgd_present(pgd) (1)
373#define pgd_clear(pgdp) do { } while (0)
374#define set_pgd(pgd,pgdp) do { } while (0)
375 338
376/* to find an entry in a page-table-directory */ 339#ifndef CONFIG_HIGHPTE
377#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 340#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
341#define __pte_unmap(pte) do { } while (0)
342#else
343#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
344#define __pte_unmap(pte) kunmap_atomic(pte)
345#endif
378 346
379#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 347#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
380 348
381/* to find an entry in a kernel page-table-directory */ 349#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
382#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
383 350
384/* Find an entry in the second-level page table.. */ 351#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
385#define pmd_offset(dir, addr) ((pmd_t *)(dir)) 352#define pte_unmap(pte) __pte_unmap(pte)
386 353
387/* Find an entry in the third-level page table.. */ 354#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
388#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 355#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
356
357#define pte_page(pte) pfn_to_page(pte_pfn(pte))
358#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
359
360#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
361#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
362
363#if __LINUX_ARM_ARCH__ < 6
364static inline void __sync_icache_dcache(pte_t pteval)
365{
366}
367#else
368extern void __sync_icache_dcache(pte_t pteval);
369#endif
370
371static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
372 pte_t *ptep, pte_t pteval)
373{
374 if (addr >= TASK_SIZE)
375 set_pte_ext(ptep, pteval, 0);
376 else {
377 __sync_icache_dcache(pteval);
378 set_pte_ext(ptep, pteval, PTE_EXT_NG);
379 }
380}
381
382#define pte_none(pte) (!pte_val(pte))
383#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
384#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
385#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
386#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
387#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
388#define pte_special(pte) (0)
389
390#define pte_present_user(pte) \
391 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
392 (L_PTE_PRESENT | L_PTE_USER))
393
394#define PTE_BIT_FUNC(fn,op) \
395static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
396
397PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
398PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
399PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
400PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
401PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
402PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
403
404static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
389 405
390static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 406static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
391{ 407{
392 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; 408 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
393 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 409 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
394 return pte; 410 return pte;
395} 411}
396 412
397extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
398
399/* 413/*
400 * Encode and decode a swap entry. Swap entries are stored in the Linux 414 * Encode and decode a swap entry. Swap entries are stored in the Linux
401 * page tables as follows: 415 * page tables as follows:
@@ -460,6 +474,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
460 474
461#define pgtable_cache_init() do { } while (0) 475#define pgtable_cache_init() do { } while (0)
462 476
477void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
478void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
479
463#endif /* !__ASSEMBLY__ */ 480#endif /* !__ASSEMBLY__ */
464 481
465#endif /* CONFIG_MMU */ 482#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 8ccea012722c..7544ce6b481a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -12,11 +12,25 @@
12#ifndef __ARM_PMU_H__ 12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__ 13#define __ARM_PMU_H__
14 14
15#include <linux/interrupt.h>
16
15enum arm_pmu_type { 17enum arm_pmu_type {
16 ARM_PMU_DEVICE_CPU = 0, 18 ARM_PMU_DEVICE_CPU = 0,
17 ARM_NUM_PMU_DEVICES, 19 ARM_NUM_PMU_DEVICES,
18}; 20};
19 21
22/*
23 * struct arm_pmu_platdata - ARM PMU platform data
24 *
25 * @handle_irq: an optional handler which will be called from the interrupt and
26 * passed the address of the low level handler, and can be used to implement
27 * any platform specific handling before or after calling it.
28 */
29struct arm_pmu_platdata {
30 irqreturn_t (*handle_irq)(int irq, void *dev,
31 irq_handler_t pmu_handler);
32};
33
20#ifdef CONFIG_CPU_HAS_PMU 34#ifdef CONFIG_CPU_HAS_PMU
21 35
22/** 36/**
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8fdae9bc9abb..8ec535e11fd7 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -13,250 +13,86 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <asm/glue-proc.h>
17#include <asm/page.h>
16 18
17/* 19#ifndef __ASSEMBLY__
18 * Work out if we need multiple CPU support 20
19 */ 21struct mm_struct;
20#undef MULTI_CPU
21#undef CPU_NAME
22 22
23/* 23/*
24 * CPU_NAME - the prefix for CPU related functions 24 * Don't change this structure - ASM code relies on it.
25 */ 25 */
26 26extern struct processor {
27#ifdef CONFIG_CPU_ARM610 27 /* MISC
28# ifdef CPU_NAME 28 * get data abort address/flags
29# undef MULTI_CPU 29 */
30# define MULTI_CPU 30 void (*_data_abort)(unsigned long pc);
31# else 31 /*
32# define CPU_NAME cpu_arm6 32 * Retrieve prefetch fault address
33# endif 33 */
34#endif 34 unsigned long (*_prefetch_abort)(unsigned long lr);
35 35 /*
36#ifdef CONFIG_CPU_ARM7TDMI 36 * Set up any processor specifics
37# ifdef CPU_NAME 37 */
38# undef MULTI_CPU 38 void (*_proc_init)(void);
39# define MULTI_CPU 39 /*
40# else 40 * Disable any processor specifics
41# define CPU_NAME cpu_arm7tdmi 41 */
42# endif 42 void (*_proc_fin)(void);
43#endif 43 /*
44 44 * Special stuff for a reset
45#ifdef CONFIG_CPU_ARM710 45 */
46# ifdef CPU_NAME 46 void (*reset)(unsigned long addr) __attribute__((noreturn));
47# undef MULTI_CPU 47 /*
48# define MULTI_CPU 48 * Idle the processor
49# else 49 */
50# define CPU_NAME cpu_arm7 50 int (*_do_idle)(void);
51# endif 51 /*
52#endif 52 * Processor architecture specific
53 53 */
54#ifdef CONFIG_CPU_ARM720T 54 /*
55# ifdef CPU_NAME 55 * clean a virtual address range from the
56# undef MULTI_CPU 56 * D-cache without flushing the cache.
57# define MULTI_CPU 57 */
58# else 58 void (*dcache_clean_area)(void *addr, int size);
59# define CPU_NAME cpu_arm720 59
60# endif 60 /*
61#endif 61 * Set the page table
62 62 */
63#ifdef CONFIG_CPU_ARM740T 63 void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
64# ifdef CPU_NAME 64 /*
65# undef MULTI_CPU 65 * Set a possibly extended PTE. Non-extended PTEs should
66# define MULTI_CPU 66 * ignore 'ext'.
67# else 67 */
68# define CPU_NAME cpu_arm740 68 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
69# endif 69
70#endif 70 /* Suspend/resume */
71 71 unsigned int suspend_size;
72#ifdef CONFIG_CPU_ARM9TDMI 72 void (*do_suspend)(void *);
73# ifdef CPU_NAME 73 void (*do_resume)(void *);
74# undef MULTI_CPU 74} processor;
75# define MULTI_CPU
76# else
77# define CPU_NAME cpu_arm9tdmi
78# endif
79#endif
80
81#ifdef CONFIG_CPU_ARM920T
82# ifdef CPU_NAME
83# undef MULTI_CPU
84# define MULTI_CPU
85# else
86# define CPU_NAME cpu_arm920
87# endif
88#endif
89
90#ifdef CONFIG_CPU_ARM922T
91# ifdef CPU_NAME
92# undef MULTI_CPU
93# define MULTI_CPU
94# else
95# define CPU_NAME cpu_arm922
96# endif
97#endif
98
99#ifdef CONFIG_CPU_FA526
100# ifdef CPU_NAME
101# undef MULTI_CPU
102# define MULTI_CPU
103# else
104# define CPU_NAME cpu_fa526
105# endif
106#endif
107
108#ifdef CONFIG_CPU_ARM925T
109# ifdef CPU_NAME
110# undef MULTI_CPU
111# define MULTI_CPU
112# else
113# define CPU_NAME cpu_arm925
114# endif
115#endif
116
117#ifdef CONFIG_CPU_ARM926T
118# ifdef CPU_NAME
119# undef MULTI_CPU
120# define MULTI_CPU
121# else
122# define CPU_NAME cpu_arm926
123# endif
124#endif
125
126#ifdef CONFIG_CPU_ARM940T
127# ifdef CPU_NAME
128# undef MULTI_CPU
129# define MULTI_CPU
130# else
131# define CPU_NAME cpu_arm940
132# endif
133#endif
134
135#ifdef CONFIG_CPU_ARM946E
136# ifdef CPU_NAME
137# undef MULTI_CPU
138# define MULTI_CPU
139# else
140# define CPU_NAME cpu_arm946
141# endif
142#endif
143
144#ifdef CONFIG_CPU_SA110
145# ifdef CPU_NAME
146# undef MULTI_CPU
147# define MULTI_CPU
148# else
149# define CPU_NAME cpu_sa110
150# endif
151#endif
152
153#ifdef CONFIG_CPU_SA1100
154# ifdef CPU_NAME
155# undef MULTI_CPU
156# define MULTI_CPU
157# else
158# define CPU_NAME cpu_sa1100
159# endif
160#endif
161
162#ifdef CONFIG_CPU_ARM1020
163# ifdef CPU_NAME
164# undef MULTI_CPU
165# define MULTI_CPU
166# else
167# define CPU_NAME cpu_arm1020
168# endif
169#endif
170
171#ifdef CONFIG_CPU_ARM1020E
172# ifdef CPU_NAME
173# undef MULTI_CPU
174# define MULTI_CPU
175# else
176# define CPU_NAME cpu_arm1020e
177# endif
178#endif
179
180#ifdef CONFIG_CPU_ARM1022
181# ifdef CPU_NAME
182# undef MULTI_CPU
183# define MULTI_CPU
184# else
185# define CPU_NAME cpu_arm1022
186# endif
187#endif
188
189#ifdef CONFIG_CPU_ARM1026
190# ifdef CPU_NAME
191# undef MULTI_CPU
192# define MULTI_CPU
193# else
194# define CPU_NAME cpu_arm1026
195# endif
196#endif
197
198#ifdef CONFIG_CPU_XSCALE
199# ifdef CPU_NAME
200# undef MULTI_CPU
201# define MULTI_CPU
202# else
203# define CPU_NAME cpu_xscale
204# endif
205#endif
206
207#ifdef CONFIG_CPU_XSC3
208# ifdef CPU_NAME
209# undef MULTI_CPU
210# define MULTI_CPU
211# else
212# define CPU_NAME cpu_xsc3
213# endif
214#endif
215
216#ifdef CONFIG_CPU_MOHAWK
217# ifdef CPU_NAME
218# undef MULTI_CPU
219# define MULTI_CPU
220# else
221# define CPU_NAME cpu_mohawk
222# endif
223#endif
224
225#ifdef CONFIG_CPU_FEROCEON
226# ifdef CPU_NAME
227# undef MULTI_CPU
228# define MULTI_CPU
229# else
230# define CPU_NAME cpu_feroceon
231# endif
232#endif
233
234#ifdef CONFIG_CPU_V6
235# ifdef CPU_NAME
236# undef MULTI_CPU
237# define MULTI_CPU
238# else
239# define CPU_NAME cpu_v6
240# endif
241#endif
242
243#ifdef CONFIG_CPU_V7
244# ifdef CPU_NAME
245# undef MULTI_CPU
246# define MULTI_CPU
247# else
248# define CPU_NAME cpu_v7
249# endif
250#endif
251
252#ifndef __ASSEMBLY__
253 75
254#ifndef MULTI_CPU 76#ifndef MULTI_CPU
255#include <asm/cpu-single.h> 77extern void cpu_proc_init(void);
78extern void cpu_proc_fin(void);
79extern int cpu_do_idle(void);
80extern void cpu_dcache_clean_area(void *, int);
81extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
82extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
83extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
256#else 84#else
257#include <asm/cpu-multi32.h> 85#define cpu_proc_init() processor._proc_init()
86#define cpu_proc_fin() processor._proc_fin()
87#define cpu_reset(addr) processor.reset(addr)
88#define cpu_do_idle() processor._do_idle()
89#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
90#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
91#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
258#endif 92#endif
259 93
94extern void cpu_resume(void);
95
260#include <asm/memory.h> 96#include <asm/memory.h>
261 97
262#ifdef CONFIG_MMU 98#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 7bed3daf83b8..b2d9df5667af 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -19,6 +19,7 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22#include <asm/hw_breakpoint.h>
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/types.h> 24#include <asm/types.h>
24 25
@@ -28,19 +29,10 @@
28#define STACK_TOP_MAX TASK_SIZE 29#define STACK_TOP_MAX TASK_SIZE
29#endif 30#endif
30 31
31union debug_insn {
32 u32 arm;
33 u16 thumb;
34};
35
36struct debug_entry {
37 u32 address;
38 union debug_insn insn;
39};
40
41struct debug_info { 32struct debug_info {
42 int nsaved; 33#ifdef CONFIG_HAVE_HW_BREAKPOINT
43 struct debug_entry bp[2]; 34 struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
35#endif
44}; 36};
45 37
46struct thread_struct { 38struct thread_struct {
@@ -91,7 +83,7 @@ extern void release_thread(struct task_struct *);
91 83
92unsigned long get_wchan(struct task_struct *p); 84unsigned long get_wchan(struct task_struct *p);
93 85
94#if __LINUX_ARM_ARCH__ == 6 86#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
95#define cpu_relax() smp_mb() 87#define cpu_relax() smp_mb()
96#else 88#else
97#define cpu_relax() barrier() 89#define cpu_relax() barrier()
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
new file mode 100644
index 000000000000..11b8708fc4db
--- /dev/null
+++ b/arch/arm/include/asm/prom.h
@@ -0,0 +1,37 @@
1/*
2 * arch/arm/include/asm/prom.h
3 *
4 * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#ifndef __ASMARM_PROM_H
12#define __ASMARM_PROM_H
13
14#ifdef CONFIG_OF
15
16#include <asm/setup.h>
17#include <asm/irq.h>
18
19static inline void irq_dispose_mapping(unsigned int virq)
20{
21 return;
22}
23
24extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
25extern void arm_dt_memblock_reserve(void);
26
27#else /* CONFIG_OF */
28
29static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
30{
31 return NULL;
32}
33
34static inline void arm_dt_memblock_reserve(void) { }
35
36#endif /* CONFIG_OF */
37#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 7ce15eb15f72..312d10877bd7 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -29,6 +29,8 @@
29#define PTRACE_SETCRUNCHREGS 26 29#define PTRACE_SETCRUNCHREGS 26
30#define PTRACE_GETVFPREGS 27 30#define PTRACE_GETVFPREGS 27
31#define PTRACE_SETVFPREGS 28 31#define PTRACE_SETVFPREGS 28
32#define PTRACE_GETHBPREGS 29
33#define PTRACE_SETHBPREGS 30
32 34
33/* 35/*
34 * PSR bits 36 * PSR bits
@@ -126,9 +128,13 @@ struct pt_regs {
126#define ARM_r0 uregs[0] 128#define ARM_r0 uregs[0]
127#define ARM_ORIG_r0 uregs[17] 129#define ARM_ORIG_r0 uregs[17]
128 130
129#ifdef __KERNEL__ 131/*
132 * The size of the user-visible VFP state as seen by PTRACE_GET/SETVFPREGS
133 * and core dumps.
134 */
135#define ARM_VFPREGS_SIZE ( 32 * 8 /*fpregs*/ + 4 /*fpscr*/ )
130 136
131#define arch_has_single_step() (1) 137#ifdef __KERNEL__
132 138
133#define user_mode(regs) \ 139#define user_mode(regs) \
134 (((regs)->ARM_cpsr & 0xf) == 0) 140 (((regs)->ARM_cpsr & 0xf) == 0)
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
new file mode 100644
index 000000000000..c8e6ddf3e860
--- /dev/null
+++ b/arch/arm/include/asm/sched_clock.h
@@ -0,0 +1,120 @@
1/*
2 * sched_clock.h: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef ASM_SCHED_CLOCK
9#define ASM_SCHED_CLOCK
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13
14struct clock_data {
15 u64 epoch_ns;
16 u32 epoch_cyc;
17 u32 epoch_cyc_copy;
18 u32 mult;
19 u32 shift;
20};
21
22#define DEFINE_CLOCK_DATA(name) struct clock_data name
23
24static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
25{
26 return (cyc * mult) >> shift;
27}
28
29/*
30 * Atomically update the sched_clock epoch. Your update callback will
31 * be called from a timer before the counter wraps - read the current
32 * counter value, and call this function to safely move the epochs
33 * forward. Only use this from the update callback.
34 */
35static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
36{
37 unsigned long flags;
38 u64 ns = cd->epoch_ns +
39 cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
40
41 /*
42 * Write epoch_cyc and epoch_ns in a way that the update is
43 * detectable in cyc_to_fixed_sched_clock().
44 */
45 raw_local_irq_save(flags);
46 cd->epoch_cyc = cyc;
47 smp_wmb();
48 cd->epoch_ns = ns;
49 smp_wmb();
50 cd->epoch_cyc_copy = cyc;
51 raw_local_irq_restore(flags);
52}
53
54/*
55 * If your clock rate is known at compile time, using this will allow
56 * you to optimize the mult/shift loads away. This is paired with
57 * init_fixed_sched_clock() to ensure that your mult/shift are correct.
58 */
59static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
60 u32 cyc, u32 mask, u32 mult, u32 shift)
61{
62 u64 epoch_ns;
63 u32 epoch_cyc;
64
65 /*
66 * Load the epoch_cyc and epoch_ns atomically. We do this by
67 * ensuring that we always write epoch_cyc, epoch_ns and
68 * epoch_cyc_copy in strict order, and read them in strict order.
69 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
70 * the middle of an update, and we should repeat the load.
71 */
72 do {
73 epoch_cyc = cd->epoch_cyc;
74 smp_rmb();
75 epoch_ns = cd->epoch_ns;
76 smp_rmb();
77 } while (epoch_cyc != cd->epoch_cyc_copy);
78
79 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
80}
81
82/*
83 * Otherwise, you need to use this, which will obtain the mult/shift
84 * from the clock_data structure. Use init_sched_clock() with this.
85 */
86static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
87 u32 cyc, u32 mask)
88{
89 return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
90}
91
92/*
93 * Initialize the clock data - calculate the appropriate multiplier
94 * and shift. Also setup a timer to ensure that the epoch is refreshed
95 * at the appropriate time interval, which will call your update
96 * handler.
97 */
98void init_sched_clock(struct clock_data *, void (*)(void),
99 unsigned int, unsigned long);
100
101/*
102 * Use this initialization function rather than init_sched_clock() if
103 * you're using cyc_to_fixed_sched_clock, which will warn if your
104 * constants are incorrect.
105 */
106static inline void init_fixed_sched_clock(struct clock_data *cd,
107 void (*update)(void), unsigned int bits, unsigned long rate,
108 u32 mult, u32 shift)
109{
110 init_sched_clock(cd, update, bits, rate);
111 if (cd->mult != mult || cd->shift != shift) {
112 pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
113 "sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
114 mult, shift, cd->mult, cd->shift);
115 }
116}
117
118extern void sched_clock_postinit(void);
119
120#endif
diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h
new file mode 100644
index 000000000000..52b156b341f5
--- /dev/null
+++ b/arch/arm/include/asm/seccomp.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_ARM_SECCOMP_H
2#define _ASM_ARM_SECCOMP_H
3
4#include <linux/unistd.h>
5
6#define __NR_seccomp_read __NR_read
7#define __NR_seccomp_write __NR_write
8#define __NR_seccomp_exit __NR_exit
9#define __NR_seccomp_sigreturn __NR_rt_sigreturn
10
11#endif /* _ASM_ARM_SECCOMP_H */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index f1e5a9bca249..ee2ad8ae07af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -192,14 +192,10 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn }
192/* 192/*
193 * Memory map description 193 * Memory map description
194 */ 194 */
195#ifdef CONFIG_ARCH_LH7A40X 195#define NR_BANKS 8
196# define NR_BANKS 16
197#else
198# define NR_BANKS 8
199#endif
200 196
201struct membank { 197struct membank {
202 unsigned long start; 198 phys_addr_t start;
203 unsigned long size; 199 unsigned long size;
204 unsigned int highmem; 200 unsigned int highmem;
205}; 201};
@@ -221,6 +217,10 @@ extern struct meminfo meminfo;
221#define bank_phys_end(bank) ((bank)->start + (bank)->size) 217#define bank_phys_end(bank) ((bank)->start + (bank)->size)
222#define bank_phys_size(bank) (bank)->size 218#define bank_phys_size(bank) (bank)->size
223 219
220extern int arm_add_memory(phys_addr_t start, unsigned long size);
221extern void early_print(const char *str, ...);
222extern void dump_machine_table(void);
223
224#endif /* __KERNEL__ */ 224#endif /* __KERNEL__ */
225 225
226#endif 226#endif
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
index 4fc1565e4f93..154b89b81d3e 100644
--- a/arch/arm/include/asm/sizes.h
+++ b/arch/arm/include/asm/sizes.h
@@ -13,47 +13,9 @@
13 * along with this program; if not, write to the Free Software 13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 */ 15 */
16/* DO NOT EDIT!! - this file automatically generated
17 * from .s file by awk -f s2h.awk
18 */
19/* Size definitions 16/* Size definitions
20 * Copyright (C) ARM Limited 1998. All rights reserved. 17 * Copyright (C) ARM Limited 1998. All rights reserved.
21 */ 18 */
19#include <asm-generic/sizes.h>
22 20
23#ifndef __sizes_h 21#define SZ_48M (SZ_32M + SZ_16M)
24#define __sizes_h 1
25
26/* handy sizes */
27#define SZ_16 0x00000010
28#define SZ_256 0x00000100
29#define SZ_512 0x00000200
30
31#define SZ_1K 0x00000400
32#define SZ_2K 0x00000800
33#define SZ_4K 0x00001000
34#define SZ_8K 0x00002000
35#define SZ_16K 0x00004000
36#define SZ_32K 0x00008000
37#define SZ_64K 0x00010000
38#define SZ_128K 0x00020000
39#define SZ_256K 0x00040000
40#define SZ_512K 0x00080000
41
42#define SZ_1M 0x00100000
43#define SZ_2M 0x00200000
44#define SZ_4M 0x00400000
45#define SZ_8M 0x00800000
46#define SZ_16M 0x01000000
47#define SZ_32M 0x02000000
48#define SZ_48M 0x03000000
49#define SZ_64M 0x04000000
50#define SZ_128M 0x08000000
51#define SZ_256M 0x10000000
52#define SZ_512M 0x20000000
53
54#define SZ_1G 0x40000000
55#define SZ_2G 0x80000000
56
57#endif
58
59/* END */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 3d05190797cb..e42d96a45d3e 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -14,46 +14,34 @@
14#include <linux/cpumask.h> 14#include <linux/cpumask.h>
15#include <linux/thread_info.h> 15#include <linux/thread_info.h>
16 16
17#include <mach/smp.h>
18
19#ifndef CONFIG_SMP 17#ifndef CONFIG_SMP
20# error "<asm/smp.h> included in non-SMP build" 18# error "<asm/smp.h> included in non-SMP build"
21#endif 19#endif
22 20
23#define raw_smp_processor_id() (current_thread_info()->cpu) 21#define raw_smp_processor_id() (current_thread_info()->cpu)
24 22
25/*
26 * at the moment, there's not a big penalty for changing CPUs
27 * (the >big< penalty is running SMP in the first place)
28 */
29#define PROC_CHANGE_PENALTY 15
30
31struct seq_file; 23struct seq_file;
32 24
33/* 25/*
34 * generate IPI list text 26 * generate IPI list text
35 */ 27 */
36extern void show_ipi_list(struct seq_file *p); 28extern void show_ipi_list(struct seq_file *, int);
37 29
38/* 30/*
39 * Called from assembly code, this handles an IPI. 31 * Called from assembly code, this handles an IPI.
40 */ 32 */
41asmlinkage void do_IPI(struct pt_regs *regs); 33asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
42 34
43/* 35/*
44 * Setup the set of possible CPUs (via set_cpu_possible) 36 * Setup the set of possible CPUs (via set_cpu_possible)
45 */ 37 */
46extern void smp_init_cpus(void); 38extern void smp_init_cpus(void);
47 39
48/*
49 * Move global data into per-processor storage.
50 */
51extern void smp_store_cpu_info(unsigned int cpuid);
52 40
53/* 41/*
54 * Raise an IPI cross call on CPUs in callmap. 42 * Provide a function to raise an IPI cross call on CPUs in callmap.
55 */ 43 */
56extern void smp_cross_call(const struct cpumask *mask); 44extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
57 45
58/* 46/*
59 * Boot a secondary CPU, and assign it the specified idle task. 47 * Boot a secondary CPU, and assign it the specified idle task.
@@ -73,10 +61,16 @@ asmlinkage void secondary_start_kernel(void);
73extern void platform_secondary_init(unsigned int cpu); 61extern void platform_secondary_init(unsigned int cpu);
74 62
75/* 63/*
64 * Initialize cpu_possible map, and enable coherency
65 */
66extern void platform_smp_prepare_cpus(unsigned int);
67
68/*
76 * Initial data for bringing up a secondary CPU. 69 * Initial data for bringing up a secondary CPU.
77 */ 70 */
78struct secondary_data { 71struct secondary_data {
79 unsigned long pgdir; 72 unsigned long pgdir;
73 unsigned long swapper_pg_dir;
80 void *stack; 74 void *stack;
81}; 75};
82extern struct secondary_data secondary_data; 76extern struct secondary_data secondary_data;
@@ -97,6 +91,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
97/* 91/*
98 * show local interrupt info 92 * show local interrupt info
99 */ 93 */
100extern void show_local_irqs(struct seq_file *); 94extern void show_local_irqs(struct seq_file *, int);
101 95
102#endif /* ifndef __ASM_ARM_SMP_H */ 96#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e6215305544a..f24c1b9e211d 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -7,15 +7,40 @@
7 7
8#include <asm/cputype.h> 8#include <asm/cputype.h>
9 9
10/*
11 * Return true if we are running on a SMP platform
12 */
13static inline bool is_smp(void)
14{
15#ifndef CONFIG_SMP
16 return false;
17#elif defined(CONFIG_SMP_ON_UP)
18 extern unsigned int smp_on_up;
19 return !!smp_on_up;
20#else
21 return true;
22#endif
23}
24
10/* all SMP configurations have the extended CPUID registers */ 25/* all SMP configurations have the extended CPUID registers */
11static inline int tlb_ops_need_broadcast(void) 26static inline int tlb_ops_need_broadcast(void)
12{ 27{
28 if (!is_smp())
29 return 0;
30
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 31 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 32}
15 33
34#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
35#define cache_ops_need_broadcast() 0
36#else
16static inline int cache_ops_need_broadcast(void) 37static inline int cache_ops_need_broadcast(void)
17{ 38{
39 if (!is_smp())
40 return 0;
41
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; 42 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19} 43}
44#endif
20 45
21#endif 46#endif
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 2376835015d6..4eb6d005ffaa 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -1,7 +1,14 @@
1#ifndef __ASMARM_ARCH_SCU_H 1#ifndef __ASMARM_ARCH_SCU_H
2#define __ASMARM_ARCH_SCU_H 2#define __ASMARM_ARCH_SCU_H
3 3
4#define SCU_PM_NORMAL 0
5#define SCU_PM_DORMANT 2
6#define SCU_PM_POWEROFF 3
7
8#ifndef __ASSEMBLER__
4unsigned int scu_get_core_count(void __iomem *); 9unsigned int scu_get_core_count(void __iomem *);
5void scu_enable(void __iomem *); 10void scu_enable(void __iomem *);
11int scu_power_mode(void __iomem *, unsigned int);
12#endif
6 13
7#endif 14#endif
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 634f357be6bb..fed9981fba08 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -22,7 +22,6 @@ struct clock_event_device;
22 22
23extern void __iomem *twd_base; 23extern void __iomem *twd_base;
24 24
25void twd_timer_stop(void);
26int twd_timer_ack(void); 25int twd_timer_ack(void);
27void twd_timer_setup(struct clock_event_device *); 26void twd_timer_setup(struct clock_event_device *);
28 27
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 17eb355707dd..65fa3c88095c 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,17 +5,54 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8#include <asm/processor.h>
9
10/*
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
13 */
14#define ALT_SMP(smp, up) \
15 "9998: " smp "\n" \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
17 " .long 9998b\n" \
18 " " up "\n" \
19 " .popsection\n"
20
21#ifdef CONFIG_THUMB2_KERNEL
22#define SEV ALT_SMP("sev.w", "nop.w")
23/*
24 * For Thumb-2, special care is needed to ensure that the conditional WFE
25 * instruction really does assemble to exactly 4 bytes (as required by
26 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
27 * assembler to insert a extra (16-bit) IT instruction, depending on the
28 * presence or absence of neighbouring conditional instructions.
29 *
30 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
31 * the assembler won't change IT instructions which are explicitly present
32 * in the input.
33 */
34#define WFE(cond) ALT_SMP( \
35 "it " cond "\n\t" \
36 "wfe" cond ".n", \
37 \
38 "nop.w" \
39)
40#else
41#define SEV ALT_SMP("sev", "nop")
42#define WFE(cond) ALT_SMP("wfe" cond, "nop")
43#endif
44
8static inline void dsb_sev(void) 45static inline void dsb_sev(void)
9{ 46{
10#if __LINUX_ARM_ARCH__ >= 7 47#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ ( 48 __asm__ __volatile__ (
12 "dsb\n" 49 "dsb\n"
13 "sev" 50 SEV
14 ); 51 );
15#elif defined(CONFIG_CPU_32v6K) 52#else
16 __asm__ __volatile__ ( 53 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n" 54 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev" 55 SEV
19 : : "r" (0) 56 : : "r" (0)
20 ); 57 );
21#endif 58#endif
@@ -46,9 +83,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
46 __asm__ __volatile__( 83 __asm__ __volatile__(
47"1: ldrex %0, [%1]\n" 84"1: ldrex %0, [%1]\n"
48" teq %0, #0\n" 85" teq %0, #0\n"
49#ifdef CONFIG_CPU_32v6K 86 WFE("ne")
50" wfene\n"
51#endif
52" strexeq %0, %2, [%1]\n" 87" strexeq %0, %2, [%1]\n"
53" teqeq %0, #0\n" 88" teqeq %0, #0\n"
54" bne 1b" 89" bne 1b"
@@ -107,9 +142,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
107 __asm__ __volatile__( 142 __asm__ __volatile__(
108"1: ldrex %0, [%1]\n" 143"1: ldrex %0, [%1]\n"
109" teq %0, #0\n" 144" teq %0, #0\n"
110#ifdef CONFIG_CPU_32v6K 145 WFE("ne")
111" wfene\n"
112#endif
113" strexeq %0, %2, [%1]\n" 146" strexeq %0, %2, [%1]\n"
114" teq %0, #0\n" 147" teq %0, #0\n"
115" bne 1b" 148" bne 1b"
@@ -176,9 +209,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
176"1: ldrex %0, [%2]\n" 209"1: ldrex %0, [%2]\n"
177" adds %0, %0, #1\n" 210" adds %0, %0, #1\n"
178" strexpl %1, %0, [%2]\n" 211" strexpl %1, %0, [%2]\n"
179#ifdef CONFIG_CPU_32v6K 212 WFE("mi")
180" wfemi\n"
181#endif
182" rsbpls %0, %1, #0\n" 213" rsbpls %0, %1, #0\n"
183" bmi 1b" 214" bmi 1b"
184 : "=&r" (tmp), "=&r" (tmp2) 215 : "=&r" (tmp), "=&r" (tmp2)
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 8ba1ccf82a02..832888d0c20c 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -63,6 +63,11 @@
63#include <asm/outercache.h> 63#include <asm/outercache.h>
64 64
65#define __exception __attribute__((section(".exception.text"))) 65#define __exception __attribute__((section(".exception.text")))
66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
67#define __exception_irq_entry __irq_entry
68#else
69#define __exception_irq_entry __exception
70#endif
66 71
67struct thread_info; 72struct thread_info;
68struct task_struct; 73struct task_struct;
@@ -85,6 +90,10 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
85 struct pt_regs *), 90 struct pt_regs *),
86 int sig, int code, const char *name); 91 int sig, int code, const char *name);
87 92
93void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
94 struct pt_regs *),
95 int sig, int code, const char *name);
96
88#define xchg(ptr,x) \ 97#define xchg(ptr,x) \
89 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 98 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
90 99
@@ -115,6 +124,13 @@ extern unsigned int user_debug;
115#define vectors_high() (0) 124#define vectors_high() (0)
116#endif 125#endif
117 126
127#if __LINUX_ARM_ARCH__ >= 7 || \
128 (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
129#define sev() __asm__ __volatile__ ("sev" : : : "memory")
130#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
131#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
132#endif
133
118#if __LINUX_ARM_ARCH__ >= 7 134#if __LINUX_ARM_ARCH__ >= 7
119#define isb() __asm__ __volatile__ ("isb" : : : "memory") 135#define isb() __asm__ __volatile__ ("isb" : : : "memory")
120#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 136#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
@@ -143,9 +159,10 @@ extern unsigned int user_debug;
143#include <mach/barriers.h> 159#include <mach/barriers.h>
144#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 160#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
145#define mb() do { dsb(); outer_sync(); } while (0) 161#define mb() do { dsb(); outer_sync(); } while (0)
146#define rmb() dmb() 162#define rmb() dsb()
147#define wmb() mb() 163#define wmb() mb()
148#else 164#else
165#include <asm/memory.h>
149#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 166#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
150#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 167#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
151#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 168#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -232,7 +249,7 @@ do { \
232 * cache totally. This means that the cache becomes inconsistent, and, 249 * cache totally. This means that the cache becomes inconsistent, and,
233 * since we use normal loads/stores as well, this is really bad. 250 * since we use normal loads/stores as well, this is really bad.
234 * Typically, this causes oopsen in filp_close, but could have other, 251 * Typically, this causes oopsen in filp_close, but could have other,
235 * more disasterous effects. There are two work-arounds: 252 * more disastrous effects. There are two work-arounds:
236 * 1. Disable interrupts and emulate the atomic swap 253 * 1. Disable interrupts and emulate the atomic swap
237 * 2. Clean the cache, perform atomic swap, flush the cache 254 * 2. Clean the cache, perform atomic swap, flush the cache
238 * 255 *
@@ -325,9 +342,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
325extern void disable_hlt(void); 342extern void disable_hlt(void);
326extern void enable_hlt(void); 343extern void enable_hlt(void);
327 344
345void cpu_idle_wait(void);
346
328#include <asm-generic/cmpxchg-local.h> 347#include <asm-generic/cmpxchg-local.h>
329 348
330#if __LINUX_ARM_ARCH__ < 6 349#if __LINUX_ARM_ARCH__ < 6
350/* min ARCH < ARMv6 */
331 351
332#ifdef CONFIG_SMP 352#ifdef CONFIG_SMP
333#error "SMP is not supported on this platform" 353#error "SMP is not supported on this platform"
@@ -346,7 +366,7 @@ extern void enable_hlt(void);
346#include <asm-generic/cmpxchg.h> 366#include <asm-generic/cmpxchg.h>
347#endif 367#endif
348 368
349#else /* __LINUX_ARM_ARCH__ >= 6 */ 369#else /* min ARCH >= ARMv6 */
350 370
351extern void __bad_cmpxchg(volatile void *ptr, int size); 371extern void __bad_cmpxchg(volatile void *ptr, int size);
352 372
@@ -360,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
360 unsigned long oldval, res; 380 unsigned long oldval, res;
361 381
362 switch (size) { 382 switch (size) {
363#ifdef CONFIG_CPU_32v6K 383#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
364 case 1: 384 case 1:
365 do { 385 do {
366 asm volatile("@ __cmpxchg1\n" 386 asm volatile("@ __cmpxchg1\n"
@@ -385,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
385 : "memory", "cc"); 405 : "memory", "cc");
386 } while (res); 406 } while (res);
387 break; 407 break;
388#endif /* CONFIG_CPU_32v6K */ 408#endif
389 case 4: 409 case 4:
390 do { 410 do {
391 asm volatile("@ __cmpxchg4\n" 411 asm volatile("@ __cmpxchg4\n"
@@ -431,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
431 unsigned long ret; 451 unsigned long ret;
432 452
433 switch (size) { 453 switch (size) {
434#ifndef CONFIG_CPU_32v6K 454#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
435 case 1: 455 case 1:
436 case 2: 456 case 2:
437 ret = __cmpxchg_local_generic(ptr, old, new, size); 457 ret = __cmpxchg_local_generic(ptr, old, new, size);
438 break; 458 break;
439#endif /* !CONFIG_CPU_32v6K */ 459#endif
440 default: 460 default:
441 ret = __cmpxchg(ptr, old, new, size); 461 ret = __cmpxchg(ptr, old, new, size);
442 } 462 }
@@ -450,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
450 (unsigned long)(n), \ 470 (unsigned long)(n), \
451 sizeof(*(ptr)))) 471 sizeof(*(ptr))))
452 472
453#ifdef CONFIG_CPU_32v6K 473#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
454 474
455/* 475/*
456 * Note : ARMv7-M (currently unsupported by Linux) does not support 476 * Note : ARMv7-M (currently unsupported by Linux) does not support
@@ -505,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
505 (unsigned long long)(o), \ 525 (unsigned long long)(o), \
506 (unsigned long long)(n))) 526 (unsigned long long)(n)))
507 527
508#else /* !CONFIG_CPU_32v6K */ 528#else /* min ARCH = ARMv6 */
509 529
510#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 530#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
511 531
512#endif /* CONFIG_CPU_32v6K */ 532#endif
513 533
514#endif /* __LINUX_ARM_ARCH__ >= 6 */ 534#endif /* __LINUX_ARM_ARCH__ >= 6 */
515 535
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 763e29fa8530..7b5cc8dae06e 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -144,6 +144,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
144#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 144#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
145#define TIF_FREEZE 19 145#define TIF_FREEZE 19
146#define TIF_RESTORE_SIGMASK 20 146#define TIF_RESTORE_SIGMASK 20
147#define TIF_SECCOMP 21
147 148
148#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 149#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
149#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 150#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -153,6 +154,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
153#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 154#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
154#define _TIF_FREEZE (1 << TIF_FREEZE) 155#define _TIF_FREEZE (1 << TIF_FREEZE)
155#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 156#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
157#define _TIF_SECCOMP (1 << TIF_SECCOMP)
156 158
157/* 159/*
158 * Change these and you break ASM code in entry-common.S 160 * Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index c4391ba20350..1dc980675894 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
43#define THREAD_NOTIFY_FLUSH 0 43#define THREAD_NOTIFY_FLUSH 0
44#define THREAD_NOTIFY_EXIT 1 44#define THREAD_NOTIFY_EXIT 1
45#define THREAD_NOTIFY_SWITCH 2 45#define THREAD_NOTIFY_SWITCH 2
46#define THREAD_NOTIFY_COPY 3
46 47
47#endif 48#endif
48#endif 49#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f41a6f57cd12..265f908c4a6e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -18,16 +18,34 @@
18#define __ASMARM_TLB_H 18#define __ASMARM_TLB_H
19 19
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22 21
23#ifndef CONFIG_MMU 22#ifndef CONFIG_MMU
24 23
25#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25
26#define tlb_flush(tlb) ((void) tlb)
27
26#include <asm-generic/tlb.h> 28#include <asm-generic/tlb.h>
27 29
28#else /* !CONFIG_MMU */ 30#else /* !CONFIG_MMU */
29 31
32#include <linux/swap.h>
30#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/tlbflush.h>
35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
44#else
45#define tlb_fast_mode(tlb) 1
46#endif
47
48#define MMU_GATHER_BUNDLE 8
31 49
32/* 50/*
33 * TLB handling. This allows us to remove pages from the page 51 * TLB handling. This allows us to remove pages from the page
@@ -36,33 +54,94 @@
36struct mmu_gather { 54struct mmu_gather {
37 struct mm_struct *mm; 55 struct mm_struct *mm;
38 unsigned int fullmm; 56 unsigned int fullmm;
57 struct vm_area_struct *vma;
39 unsigned long range_start; 58 unsigned long range_start;
40 unsigned long range_end; 59 unsigned long range_end;
60 unsigned int nr;
61 unsigned int max;
62 struct page **pages;
63 struct page *local[MMU_GATHER_BUNDLE];
41}; 64};
42 65
43DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 66DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
44 67
45static inline struct mmu_gather * 68/*
46tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 69 * This is unnecessarily complex. There's three ways the TLB shootdown
70 * code is used:
71 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
72 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
73 * tlb->vma will be non-NULL.
74 * 2. Unmapping all vmas. See exit_mmap().
75 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
76 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
77 * 3. Unmapping argument pages. See shift_arg_pages().
78 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
79 * tlb->vma will be NULL.
80 */
81static inline void tlb_flush(struct mmu_gather *tlb)
82{
83 if (tlb->fullmm || !tlb->vma)
84 flush_tlb_mm(tlb->mm);
85 else if (tlb->range_end > 0) {
86 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
87 tlb->range_start = TASK_SIZE;
88 tlb->range_end = 0;
89 }
90}
91
92static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
47{ 93{
48 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 94 if (!tlb->fullmm) {
95 if (addr < tlb->range_start)
96 tlb->range_start = addr;
97 if (addr + PAGE_SIZE > tlb->range_end)
98 tlb->range_end = addr + PAGE_SIZE;
99 }
100}
49 101
50 tlb->mm = mm; 102static inline void __tlb_alloc_page(struct mmu_gather *tlb)
51 tlb->fullmm = full_mm_flush; 103{
104 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
52 105
53 return tlb; 106 if (addr) {
107 tlb->pages = (void *)addr;
108 tlb->max = PAGE_SIZE / sizeof(struct page *);
109 }
110}
111
112static inline void tlb_flush_mmu(struct mmu_gather *tlb)
113{
114 tlb_flush(tlb);
115 if (!tlb_fast_mode(tlb)) {
116 free_pages_and_swap_cache(tlb->pages, tlb->nr);
117 tlb->nr = 0;
118 if (tlb->pages == tlb->local)
119 __tlb_alloc_page(tlb);
120 }
121}
122
123static inline void
124tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
125{
126 tlb->mm = mm;
127 tlb->fullmm = fullmm;
128 tlb->vma = NULL;
129 tlb->max = ARRAY_SIZE(tlb->local);
130 tlb->pages = tlb->local;
131 tlb->nr = 0;
132 __tlb_alloc_page(tlb);
54} 133}
55 134
56static inline void 135static inline void
57tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 136tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
58{ 137{
59 if (tlb->fullmm) 138 tlb_flush_mmu(tlb);
60 flush_tlb_mm(tlb->mm);
61 139
62 /* keep the page table cache within bounds */ 140 /* keep the page table cache within bounds */
63 check_pgt_cache(); 141 check_pgt_cache();
64 142
65 put_cpu_var(mmu_gathers); 143 if (tlb->pages != tlb->local)
144 free_pages((unsigned long)tlb->pages, 0);
66} 145}
67 146
68/* 147/*
@@ -71,12 +150,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
71static inline void 150static inline void
72tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 151tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
73{ 152{
74 if (!tlb->fullmm) { 153 tlb_add_flush(tlb, addr);
75 if (addr < tlb->range_start)
76 tlb->range_start = addr;
77 if (addr + PAGE_SIZE > tlb->range_end)
78 tlb->range_end = addr + PAGE_SIZE;
79 }
80} 154}
81 155
82/* 156/*
@@ -89,6 +163,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
89{ 163{
90 if (!tlb->fullmm) { 164 if (!tlb->fullmm) {
91 flush_cache_range(vma, vma->vm_start, vma->vm_end); 165 flush_cache_range(vma, vma->vm_start, vma->vm_end);
166 tlb->vma = vma;
92 tlb->range_start = TASK_SIZE; 167 tlb->range_start = TASK_SIZE;
93 tlb->range_end = 0; 168 tlb->range_end = 0;
94 } 169 }
@@ -97,12 +172,37 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
97static inline void 172static inline void
98tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 173tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
99{ 174{
100 if (!tlb->fullmm && tlb->range_end > 0) 175 if (!tlb->fullmm)
101 flush_tlb_range(vma, tlb->range_start, tlb->range_end); 176 tlb_flush(tlb);
177}
178
179static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
180{
181 if (tlb_fast_mode(tlb)) {
182 free_page_and_swap_cache(page);
183 return 1; /* avoid calling tlb_flush_mmu */
184 }
185
186 tlb->pages[tlb->nr++] = page;
187 VM_BUG_ON(tlb->nr > tlb->max);
188 return tlb->max - tlb->nr;
189}
190
191static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
192{
193 if (!__tlb_remove_page(tlb, page))
194 tlb_flush_mmu(tlb);
195}
196
197static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
198 unsigned long addr)
199{
200 pgtable_page_dtor(pte);
201 tlb_add_flush(tlb, addr);
202 tlb_remove_page(tlb, pte);
102} 203}
103 204
104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 205#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
105#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
106#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 206#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
107 207
108#define tlb_migrate_finish(mm) do { } while (0) 208#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546ae72d4..d2005de383b8 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -10,12 +10,7 @@
10#ifndef _ASMARM_TLBFLUSH_H 10#ifndef _ASMARM_TLBFLUSH_H
11#define _ASMARM_TLBFLUSH_H 11#define _ASMARM_TLBFLUSH_H
12 12
13 13#ifdef CONFIG_MMU
14#ifndef CONFIG_MMU
15
16#define tlb_flush(tlb) ((void) tlb)
17
18#else /* CONFIG_MMU */
19 14
20#include <asm/glue.h> 15#include <asm/glue.h>
21 16
@@ -70,6 +65,10 @@
70#undef _TLB 65#undef _TLB
71#undef MULTI_TLB 66#undef MULTI_TLB
72 67
68#ifdef CONFIG_SMP_ON_UP
69#define MULTI_TLB 1
70#endif
71
73#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) 72#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
74 73
75#ifdef CONFIG_CPU_TLB_V3 74#ifdef CONFIG_CPU_TLB_V3
@@ -185,17 +184,23 @@
185# define v6wbi_always_flags (-1UL) 184# define v6wbi_always_flags (-1UL)
186#endif 185#endif
187 186
188#ifdef CONFIG_SMP 187#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
189#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
190 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 188 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
191#else 189#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
192#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
193 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 190 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
194#endif
195 191
196#ifdef CONFIG_CPU_TLB_V7 192#ifdef CONFIG_CPU_TLB_V7
197# define v7wbi_possible_flags v7wbi_tlb_flags 193
198# define v7wbi_always_flags v7wbi_tlb_flags 194# ifdef CONFIG_SMP_ON_UP
195# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
196# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
197# elif defined(CONFIG_SMP)
198# define v7wbi_possible_flags v7wbi_tlb_flags_smp
199# define v7wbi_always_flags v7wbi_tlb_flags_smp
200# else
201# define v7wbi_possible_flags v7wbi_tlb_flags_up
202# define v7wbi_always_flags v7wbi_tlb_flags_up
203# endif
199# ifdef _TLB 204# ifdef _TLB
200# define MULTI_TLB 1 205# define MULTI_TLB 1
201# else 206# else
@@ -560,12 +565,20 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
560#endif 565#endif
561 566
562/* 567/*
563 * if PG_dcache_dirty is set for the page, we need to ensure that any 568 * If PG_dcache_clean is not set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written 569 * cache entries for the kernels virtual memory range are written
565 * back to the page. 570 * back to the page. On ARMv6 and later, the cache coherency is handled via
571 * the set_pte_at() function.
566 */ 572 */
573#if __LINUX_ARM_ARCH__ < 6
567extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 574extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
568 pte_t *ptep); 575 pte_t *ptep);
576#else
577static inline void update_mmu_cache(struct vm_area_struct *vma,
578 unsigned long addr, pte_t *ptep)
579{
580}
581#endif
569 582
570#endif 583#endif
571 584
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index e71d6ff8d104..60843eb0f61c 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -28,15 +28,14 @@
28#define tls_emu 1 28#define tls_emu 1
29#define has_tls_reg 1 29#define has_tls_reg 1
30#define set_tls set_tls_none 30#define set_tls set_tls_none
31#elif __LINUX_ARM_ARCH__ >= 7 || \ 31#elif defined(CONFIG_CPU_V6)
32 (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
33#define tls_emu 0
34#define has_tls_reg 1
35#define set_tls set_tls_v6k
36#elif __LINUX_ARM_ARCH__ == 6
37#define tls_emu 0 32#define tls_emu 0
38#define has_tls_reg (elf_hwcap & HWCAP_TLS) 33#define has_tls_reg (elf_hwcap & HWCAP_TLS)
39#define set_tls set_tls_v6 34#define set_tls set_tls_v6
35#elif defined(CONFIG_CPU_32v6K)
36#define tls_emu 0
37#define has_tls_reg 1
38#define set_tls set_tls_v6k
40#else 39#else
41#define tls_emu 0 40#define tls_emu 0
42#define has_tls_reg 0 41#define has_tls_reg 0
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 491960bf4260..f90756dc16dc 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -15,16 +15,38 @@ struct undef_hook {
15void register_undef_hook(struct undef_hook *hook); 15void register_undef_hook(struct undef_hook *hook);
16void unregister_undef_hook(struct undef_hook *hook); 16void unregister_undef_hook(struct undef_hook *hook);
17 17
18#ifdef CONFIG_FUNCTION_GRAPH_TRACER
19static inline int __in_irqentry_text(unsigned long ptr)
20{
21 extern char __irqentry_text_start[];
22 extern char __irqentry_text_end[];
23
24 return ptr >= (unsigned long)&__irqentry_text_start &&
25 ptr < (unsigned long)&__irqentry_text_end;
26}
27#else
28static inline int __in_irqentry_text(unsigned long ptr)
29{
30 return 0;
31}
32#endif
33
18static inline int in_exception_text(unsigned long ptr) 34static inline int in_exception_text(unsigned long ptr)
19{ 35{
20 extern char __exception_text_start[]; 36 extern char __exception_text_start[];
21 extern char __exception_text_end[]; 37 extern char __exception_text_end[];
38 int in;
22 39
23 return ptr >= (unsigned long)&__exception_text_start && 40 in = ptr >= (unsigned long)&__exception_text_start &&
24 ptr < (unsigned long)&__exception_text_end; 41 ptr < (unsigned long)&__exception_text_end;
42
43 return in ? : __in_irqentry_text(ptr);
25} 44}
26 45
27extern void __init early_trap_init(void); 46extern void __init early_trap_init(void);
28extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 47extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
48extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
49
50extern void *vectors_page;
29 51
30#endif 52#endif
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
index 345df01534a4..48192ac3a23a 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/asm/types.h
@@ -16,15 +16,6 @@ typedef unsigned short umode_t;
16 16
17#define BITS_PER_LONG 32 17#define BITS_PER_LONG 32
18 18
19#ifndef __ASSEMBLY__
20
21/* Dma addresses are 32-bits wide. */
22
23typedef u32 dma_addr_t;
24typedef u32 dma64_addr_t;
25
26#endif /* __ASSEMBLY__ */
27
28#endif /* __KERNEL__ */ 19#endif /* __KERNEL__ */
29 20
30#endif 21#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 33e4a48fe103..b293616a1a1a 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -227,7 +227,7 @@ do { \
227 227
228#define __get_user_asm_byte(x,addr,err) \ 228#define __get_user_asm_byte(x,addr,err) \
229 __asm__ __volatile__( \ 229 __asm__ __volatile__( \
230 "1: ldrbt %1,[%2]\n" \ 230 "1: " T(ldrb) " %1,[%2],#0\n" \
231 "2:\n" \ 231 "2:\n" \
232 " .pushsection .fixup,\"ax\"\n" \ 232 " .pushsection .fixup,\"ax\"\n" \
233 " .align 2\n" \ 233 " .align 2\n" \
@@ -263,7 +263,7 @@ do { \
263 263
264#define __get_user_asm_word(x,addr,err) \ 264#define __get_user_asm_word(x,addr,err) \
265 __asm__ __volatile__( \ 265 __asm__ __volatile__( \
266 "1: ldrt %1,[%2]\n" \ 266 "1: " T(ldr) " %1,[%2],#0\n" \
267 "2:\n" \ 267 "2:\n" \
268 " .pushsection .fixup,\"ax\"\n" \ 268 " .pushsection .fixup,\"ax\"\n" \
269 " .align 2\n" \ 269 " .align 2\n" \
@@ -308,7 +308,7 @@ do { \
308 308
309#define __put_user_asm_byte(x,__pu_addr,err) \ 309#define __put_user_asm_byte(x,__pu_addr,err) \
310 __asm__ __volatile__( \ 310 __asm__ __volatile__( \
311 "1: strbt %1,[%2]\n" \ 311 "1: " T(strb) " %1,[%2],#0\n" \
312 "2:\n" \ 312 "2:\n" \
313 " .pushsection .fixup,\"ax\"\n" \ 313 " .pushsection .fixup,\"ax\"\n" \
314 " .align 2\n" \ 314 " .align 2\n" \
@@ -341,7 +341,7 @@ do { \
341 341
342#define __put_user_asm_word(x,__pu_addr,err) \ 342#define __put_user_asm_word(x,__pu_addr,err) \
343 __asm__ __volatile__( \ 343 __asm__ __volatile__( \
344 "1: strt %1,[%2]\n" \ 344 "1: " T(str) " %1,[%2],#0\n" \
345 "2:\n" \ 345 "2:\n" \
346 " .pushsection .fixup,\"ax\"\n" \ 346 " .pushsection .fixup,\"ax\"\n" \
347 " .align 2\n" \ 347 " .align 2\n" \
@@ -366,10 +366,10 @@ do { \
366 366
367#define __put_user_asm_dword(x,__pu_addr,err) \ 367#define __put_user_asm_dword(x,__pu_addr,err) \
368 __asm__ __volatile__( \ 368 __asm__ __volatile__( \
369 ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ 369 ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
370 ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ 370 ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
371 THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ 371 THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
372 THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ 372 THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
373 "3:\n" \ 373 "3:\n" \
374 " .pushsection .fixup,\"ax\"\n" \ 374 " .pushsection .fixup,\"ax\"\n" \
375 " .align 2\n" \ 375 " .align 2\n" \
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 47f023aa8495..14749aec94bf 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -47,7 +47,7 @@ struct crunch_sigframe {
47#endif 47#endif
48 48
49#ifdef CONFIG_IWMMXT 49#ifdef CONFIG_IWMMXT
50/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */ 50/* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */
51#define IWMMXT_MAGIC 0x12ef842a 51#define IWMMXT_MAGIC 0x12ef842a
52#define IWMMXT_STORAGE_SIZE (IWMMXT_SIZE + 8) 52#define IWMMXT_STORAGE_SIZE (IWMMXT_SIZE + 8)
53 53
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 625b30490624..110d113d4afb 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -396,6 +396,12 @@
396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367) 396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) 397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369) 398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
399#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370)
400#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
401#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
402#define __NR_syncfs (__NR_SYSCALL_BASE+373)
403#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
404#define __NR_setns (__NR_SYSCALL_BASE+375)
399 405
400#define __NR_LITMUS (__NR_SYSCALL_BASE+370) 406#define __NR_LITMUS (__NR_SYSCALL_BASE+370)
401#include <litmus/unistd_32.h> 407#include <litmus/unistd_32.h>
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index 05ac4b06876a..35917b3a97f9 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -71,7 +71,7 @@ struct user{
71 /* the registers. */ 71 /* the registers. */
72 unsigned long magic; /* To uniquely identify a core file */ 72 unsigned long magic; /* To uniquely identify a core file */
73 char u_comm[32]; /* User command that was responsible */ 73 char u_comm[32]; /* User command that was responsible */
74 int u_debugreg[8]; 74 int u_debugreg[8]; /* No longer used */
75 struct user_fp u_fp; /* FP state */ 75 struct user_fp u_fp; /* FP state */
76 struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ 76 struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
77 /* the FP registers. */ 77 /* the FP registers. */