aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h132
-rw-r--r--arch/arm/include/asm/hwcap.h1
-rw-r--r--arch/arm/include/asm/irq.h2
-rw-r--r--arch/arm/include/asm/kexec.h22
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/mach/irq.h1
-rw-r--r--arch/arm/include/asm/memblock.h16
-rw-r--r--arch/arm/include/asm/memory.h67
-rw-r--r--arch/arm/include/asm/mmzone.h30
-rw-r--r--arch/arm/include/asm/ptrace.h36
-rw-r--r--arch/arm/include/asm/setup.h8
-rw-r--r--arch/arm/include/asm/tls.h46
-rw-r--r--arch/arm/include/asm/vfpmacros.h18
13 files changed, 211 insertions, 170 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index a0162fa94564..7e79503ab89b 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
40 int result; 40 int result;
41 41
42 __asm__ __volatile__("@ atomic_add\n" 42 __asm__ __volatile__("@ atomic_add\n"
43"1: ldrex %0, [%2]\n" 43"1: ldrex %0, [%3]\n"
44" add %0, %0, %3\n" 44" add %0, %0, %4\n"
45" strex %1, %0, [%2]\n" 45" strex %1, %0, [%3]\n"
46" teq %1, #0\n" 46" teq %1, #0\n"
47" bne 1b" 47" bne 1b"
48 : "=&r" (result), "=&r" (tmp) 48 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
49 : "r" (&v->counter), "Ir" (i) 49 : "r" (&v->counter), "Ir" (i)
50 : "cc"); 50 : "cc");
51} 51}
@@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
58 smp_mb(); 58 smp_mb();
59 59
60 __asm__ __volatile__("@ atomic_add_return\n" 60 __asm__ __volatile__("@ atomic_add_return\n"
61"1: ldrex %0, [%2]\n" 61"1: ldrex %0, [%3]\n"
62" add %0, %0, %3\n" 62" add %0, %0, %4\n"
63" strex %1, %0, [%2]\n" 63" strex %1, %0, [%3]\n"
64" teq %1, #0\n" 64" teq %1, #0\n"
65" bne 1b" 65" bne 1b"
66 : "=&r" (result), "=&r" (tmp) 66 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
67 : "r" (&v->counter), "Ir" (i) 67 : "r" (&v->counter), "Ir" (i)
68 : "cc"); 68 : "cc");
69 69
@@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
78 int result; 78 int result;
79 79
80 __asm__ __volatile__("@ atomic_sub\n" 80 __asm__ __volatile__("@ atomic_sub\n"
81"1: ldrex %0, [%2]\n" 81"1: ldrex %0, [%3]\n"
82" sub %0, %0, %3\n" 82" sub %0, %0, %4\n"
83" strex %1, %0, [%2]\n" 83" strex %1, %0, [%3]\n"
84" teq %1, #0\n" 84" teq %1, #0\n"
85" bne 1b" 85" bne 1b"
86 : "=&r" (result), "=&r" (tmp) 86 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
87 : "r" (&v->counter), "Ir" (i) 87 : "r" (&v->counter), "Ir" (i)
88 : "cc"); 88 : "cc");
89} 89}
@@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
96 smp_mb(); 96 smp_mb();
97 97
98 __asm__ __volatile__("@ atomic_sub_return\n" 98 __asm__ __volatile__("@ atomic_sub_return\n"
99"1: ldrex %0, [%2]\n" 99"1: ldrex %0, [%3]\n"
100" sub %0, %0, %3\n" 100" sub %0, %0, %4\n"
101" strex %1, %0, [%2]\n" 101" strex %1, %0, [%3]\n"
102" teq %1, #0\n" 102" teq %1, #0\n"
103" bne 1b" 103" bne 1b"
104 : "=&r" (result), "=&r" (tmp) 104 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105 : "r" (&v->counter), "Ir" (i) 105 : "r" (&v->counter), "Ir" (i)
106 : "cc"); 106 : "cc");
107 107
@@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
118 118
119 do { 119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n" 120 __asm__ __volatile__("@ atomic_cmpxchg\n"
121 "ldrex %1, [%2]\n" 121 "ldrex %1, [%3]\n"
122 "mov %0, #0\n" 122 "mov %0, #0\n"
123 "teq %1, %3\n" 123 "teq %1, %4\n"
124 "strexeq %0, %4, [%2]\n" 124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res), "=&r" (oldval) 125 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126 : "r" (&ptr->counter), "Ir" (old), "r" (new) 126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc"); 127 : "cc");
128 } while (res); 128 } while (res);
@@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
137 unsigned long tmp, tmp2; 137 unsigned long tmp, tmp2;
138 138
139 __asm__ __volatile__("@ atomic_clear_mask\n" 139 __asm__ __volatile__("@ atomic_clear_mask\n"
140"1: ldrex %0, [%2]\n" 140"1: ldrex %0, [%3]\n"
141" bic %0, %0, %3\n" 141" bic %0, %0, %4\n"
142" strex %1, %0, [%2]\n" 142" strex %1, %0, [%3]\n"
143" teq %1, #0\n" 143" teq %1, #0\n"
144" bne 1b" 144" bne 1b"
145 : "=&r" (tmp), "=&r" (tmp2) 145 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146 : "r" (addr), "Ir" (mask) 146 : "r" (addr), "Ir" (mask)
147 : "cc"); 147 : "cc");
148} 148}
@@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v)
249 __asm__ __volatile__("@ atomic64_read\n" 249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]" 250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result) 251 : "=&r" (result)
252 : "r" (&v->counter) 252 : "r" (&v->counter), "Qo" (v->counter)
253 ); 253 );
254 254
255 return result; 255 return result;
@@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
260 u64 tmp; 260 u64 tmp;
261 261
262 __asm__ __volatile__("@ atomic64_set\n" 262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n" 263"1: ldrexd %0, %H0, [%2]\n"
264" strexd %0, %2, %H2, [%1]\n" 264" strexd %0, %3, %H3, [%2]\n"
265" teq %0, #0\n" 265" teq %0, #0\n"
266" bne 1b" 266" bne 1b"
267 : "=&r" (tmp) 267 : "=&r" (tmp), "=Qo" (v->counter)
268 : "r" (&v->counter), "r" (i) 268 : "r" (&v->counter), "r" (i)
269 : "cc"); 269 : "cc");
270} 270}
@@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
275 unsigned long tmp; 275 unsigned long tmp;
276 276
277 __asm__ __volatile__("@ atomic64_add\n" 277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n" 278"1: ldrexd %0, %H0, [%3]\n"
279" adds %0, %0, %3\n" 279" adds %0, %0, %4\n"
280" adc %H0, %H0, %H3\n" 280" adc %H0, %H0, %H4\n"
281" strexd %1, %0, %H0, [%2]\n" 281" strexd %1, %0, %H0, [%3]\n"
282" teq %1, #0\n" 282" teq %1, #0\n"
283" bne 1b" 283" bne 1b"
284 : "=&r" (result), "=&r" (tmp) 284 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
285 : "r" (&v->counter), "r" (i) 285 : "r" (&v->counter), "r" (i)
286 : "cc"); 286 : "cc");
287} 287}
@@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
294 smp_mb(); 294 smp_mb();
295 295
296 __asm__ __volatile__("@ atomic64_add_return\n" 296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n" 297"1: ldrexd %0, %H0, [%3]\n"
298" adds %0, %0, %3\n" 298" adds %0, %0, %4\n"
299" adc %H0, %H0, %H3\n" 299" adc %H0, %H0, %H4\n"
300" strexd %1, %0, %H0, [%2]\n" 300" strexd %1, %0, %H0, [%3]\n"
301" teq %1, #0\n" 301" teq %1, #0\n"
302" bne 1b" 302" bne 1b"
303 : "=&r" (result), "=&r" (tmp) 303 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
304 : "r" (&v->counter), "r" (i) 304 : "r" (&v->counter), "r" (i)
305 : "cc"); 305 : "cc");
306 306
@@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
315 unsigned long tmp; 315 unsigned long tmp;
316 316
317 __asm__ __volatile__("@ atomic64_sub\n" 317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n" 318"1: ldrexd %0, %H0, [%3]\n"
319" subs %0, %0, %3\n" 319" subs %0, %0, %4\n"
320" sbc %H0, %H0, %H3\n" 320" sbc %H0, %H0, %H4\n"
321" strexd %1, %0, %H0, [%2]\n" 321" strexd %1, %0, %H0, [%3]\n"
322" teq %1, #0\n" 322" teq %1, #0\n"
323" bne 1b" 323" bne 1b"
324 : "=&r" (result), "=&r" (tmp) 324 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
325 : "r" (&v->counter), "r" (i) 325 : "r" (&v->counter), "r" (i)
326 : "cc"); 326 : "cc");
327} 327}
@@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
334 smp_mb(); 334 smp_mb();
335 335
336 __asm__ __volatile__("@ atomic64_sub_return\n" 336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n" 337"1: ldrexd %0, %H0, [%3]\n"
338" subs %0, %0, %3\n" 338" subs %0, %0, %4\n"
339" sbc %H0, %H0, %H3\n" 339" sbc %H0, %H0, %H4\n"
340" strexd %1, %0, %H0, [%2]\n" 340" strexd %1, %0, %H0, [%3]\n"
341" teq %1, #0\n" 341" teq %1, #0\n"
342" bne 1b" 342" bne 1b"
343 : "=&r" (result), "=&r" (tmp) 343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
344 : "r" (&v->counter), "r" (i) 344 : "r" (&v->counter), "r" (i)
345 : "cc"); 345 : "cc");
346 346
@@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
358 358
359 do { 359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n" 360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n" 361 "ldrexd %1, %H1, [%3]\n"
362 "mov %0, #0\n" 362 "mov %0, #0\n"
363 "teq %1, %3\n" 363 "teq %1, %4\n"
364 "teqeq %H1, %H3\n" 364 "teqeq %H1, %H4\n"
365 "strexdeq %0, %4, %H4, [%2]" 365 "strexdeq %0, %5, %H5, [%3]"
366 : "=&r" (res), "=&r" (oldval) 366 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
367 : "r" (&ptr->counter), "r" (old), "r" (new) 367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc"); 368 : "cc");
369 } while (res); 369 } while (res);
@@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
381 smp_mb(); 381 smp_mb();
382 382
383 __asm__ __volatile__("@ atomic64_xchg\n" 383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n" 384"1: ldrexd %0, %H0, [%3]\n"
385" strexd %1, %3, %H3, [%2]\n" 385" strexd %1, %4, %H4, [%3]\n"
386" teq %1, #0\n" 386" teq %1, #0\n"
387" bne 1b" 387" bne 1b"
388 : "=&r" (result), "=&r" (tmp) 388 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
389 : "r" (&ptr->counter), "r" (new) 389 : "r" (&ptr->counter), "r" (new)
390 : "cc"); 390 : "cc");
391 391
@@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
402 smp_mb(); 402 smp_mb();
403 403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n" 405"1: ldrexd %0, %H0, [%3]\n"
406" subs %0, %0, #1\n" 406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n" 407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n" 408" teq %H0, #0\n"
409" bmi 2f\n" 409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n" 410" strexd %1, %0, %H0, [%3]\n"
411" teq %1, #0\n" 411" teq %1, #0\n"
412" bne 1b\n" 412" bne 1b\n"
413"2:" 413"2:"
414 : "=&r" (result), "=&r" (tmp) 414 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
415 : "r" (&v->counter) 415 : "r" (&v->counter)
416 : "cc"); 416 : "cc");
417 417
@@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
429 smp_mb(); 429 smp_mb();
430 430
431 __asm__ __volatile__("@ atomic64_add_unless\n" 431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n" 432"1: ldrexd %0, %H0, [%4]\n"
433" teq %0, %4\n" 433" teq %0, %5\n"
434" teqeq %H0, %H4\n" 434" teqeq %H0, %H5\n"
435" moveq %1, #0\n" 435" moveq %1, #0\n"
436" beq 2f\n" 436" beq 2f\n"
437" adds %0, %0, %5\n" 437" adds %0, %0, %6\n"
438" adc %H0, %H0, %H5\n" 438" adc %H0, %H0, %H6\n"
439" strexd %2, %0, %H0, [%3]\n" 439" strexd %2, %0, %H0, [%4]\n"
440" teq %2, #0\n" 440" teq %2, #0\n"
441" bne 1b\n" 441" bne 1b\n"
442"2:" 442"2:"
443 : "=&r" (val), "=&r" (ret), "=&r" (tmp) 443 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
444 : "r" (&v->counter), "r" (u), "r" (a) 444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc"); 445 : "cc");
446 446
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index f7bd52b1c365..c1062c317103 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -19,6 +19,7 @@
19#define HWCAP_NEON 4096 19#define HWCAP_NEON 4096
20#define HWCAP_VFPv3 8192 20#define HWCAP_VFPv3 8192
21#define HWCAP_VFPv3D16 16384 21#define HWCAP_VFPv3D16 16384
22#define HWCAP_TLS 32768
22 23
23#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 24#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
24/* 25/*
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 237282f7c762..2721a5814cb9 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -7,6 +7,8 @@
7#define irq_canonicalize(i) (i) 7#define irq_canonicalize(i) (i)
8#endif 8#endif
9 9
10#define NR_IRQS_LEGACY 16
11
10/* 12/*
11 * Use this value to indicate lack of interrupt 13 * Use this value to indicate lack of interrupt
12 * capability 14 * capability
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index df15a0dc228e..8ec9ef5c3c7b 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -19,10 +19,26 @@
19 19
20#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
21 21
22struct kimage; 22/**
23/* Provide a dummy definition to avoid build failures. */ 23 * crash_setup_regs() - save registers for the panic kernel
24 * @newregs: registers are saved here
25 * @oldregs: registers to be saved (may be %NULL)
26 *
27 * Function copies machine registers from @oldregs to @newregs. If @oldregs is
28 * %NULL then current registers are stored there.
29 */
24static inline void crash_setup_regs(struct pt_regs *newregs, 30static inline void crash_setup_regs(struct pt_regs *newregs,
25 struct pt_regs *oldregs) { } 31 struct pt_regs *oldregs)
32{
33 if (oldregs) {
34 memcpy(newregs, oldregs, sizeof(*newregs));
35 } else {
36 __asm__ __volatile__ ("stmia %0, {r0 - r15}"
37 : : "r" (&newregs->ARM_r0));
38 __asm__ __volatile__ ("mrs %0, cpsr"
39 : "=r" (newregs->ARM_cpsr));
40 }
41}
26 42
27#endif /* __ASSEMBLY__ */ 43#endif /* __ASSEMBLY__ */
28 44
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index c59842dc7cb8..8a0dd18ba642 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -20,6 +20,7 @@ struct machine_desc {
20 * by assembler code in head.S, head-common.S 20 * by assembler code in head.S, head-common.S
21 */ 21 */
22 unsigned int nr; /* architecture number */ 22 unsigned int nr; /* architecture number */
23 unsigned int nr_irqs; /* number of IRQs */
23 unsigned int phys_io; /* start of physical io */ 24 unsigned int phys_io; /* start of physical io */
24 unsigned int io_pg_offst; /* byte offset for io 25 unsigned int io_pg_offst; /* byte offset for io
25 * page tabe entry */ 26 * page tabe entry */
@@ -37,6 +38,7 @@ struct machine_desc {
37 void (*fixup)(struct machine_desc *, 38 void (*fixup)(struct machine_desc *,
38 struct tag *, char **, 39 struct tag *, char **,
39 struct meminfo *); 40 struct meminfo *);
41 void (*reserve)(void);/* reserve mem blocks */
40 void (*map_io)(void);/* IO mapping function */ 42 void (*map_io)(void);/* IO mapping function */
41 void (*init_irq)(void); 43 void (*init_irq)(void);
42 struct sys_timer *timer; /* system tick timer */ 44 struct sys_timer *timer; /* system tick timer */
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 8920b2d6e3b8..ce3eee9fe26c 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -17,6 +17,7 @@ struct seq_file;
17/* 17/*
18 * This is internal. Do not use it. 18 * This is internal. Do not use it.
19 */ 19 */
20extern unsigned int arch_nr_irqs;
20extern void (*init_arch_irq)(void); 21extern void (*init_arch_irq)(void);
21extern void init_FIQ(void); 22extern void init_FIQ(void);
22extern int show_fiq_list(struct seq_file *, void *); 23extern int show_fiq_list(struct seq_file *, void *);
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
new file mode 100644
index 000000000000..fdbc43b2e6c0
--- /dev/null
+++ b/arch/arm/include/asm/memblock.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_ARM_MEMBLOCK_H
2#define _ASM_ARM_MEMBLOCK_H
3
4#ifdef CONFIG_MMU
5extern phys_addr_t lowmem_end_addr;
6#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
7#else
8#define MEMBLOCK_REAL_LIMIT 0
9#endif
10
11struct meminfo;
12struct machine_desc;
13
14extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
15
16#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4312ee5e3d0b..82df0ae71bb4 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -158,7 +158,7 @@
158#endif 158#endif
159 159
160#ifndef arch_adjust_zones 160#ifndef arch_adjust_zones
161#define arch_adjust_zones(node,size,holes) do { } while (0) 161#define arch_adjust_zones(size,holes) do { } while (0)
162#elif !defined(CONFIG_ZONE_DMA) 162#elif !defined(CONFIG_ZONE_DMA)
163#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA" 163#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
164#endif 164#endif
@@ -234,76 +234,11 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
234 * virt_to_page(k) convert a _valid_ virtual address to struct page * 234 * virt_to_page(k) convert a _valid_ virtual address to struct page *
235 * virt_addr_valid(k) indicates whether a virtual address is valid 235 * virt_addr_valid(k) indicates whether a virtual address is valid
236 */ 236 */
237#ifndef CONFIG_DISCONTIGMEM
238
239#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 237#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
240 238
241#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 239#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
242#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) 240#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
243 241
244#define PHYS_TO_NID(addr) (0)
245
246#else /* CONFIG_DISCONTIGMEM */
247
248/*
249 * This is more complex. We have a set of mem_map arrays spread
250 * around in memory.
251 */
252#include <linux/numa.h>
253
254#define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn)
255#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT)
256
257#define virt_to_page(kaddr) \
258 (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
259
260#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES)
261
262/*
263 * Common discontigmem stuff.
264 * PHYS_TO_NID is used by the ARM kernel/setup.c
265 */
266#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
267
268/*
269 * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
270 * and returns the mem_map of that node.
271 */
272#define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr))
273
274/*
275 * Given a page frame number, find the owning node of the memory
276 * and returns the mem_map of that node.
277 */
278#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
279
280#ifdef NODE_MEM_SIZE_BITS
281#define NODE_MEM_SIZE_MASK ((1 << NODE_MEM_SIZE_BITS) - 1)
282
283/*
284 * Given a kernel address, find the home node of the underlying memory.
285 */
286#define KVADDR_TO_NID(addr) \
287 (((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MEM_SIZE_BITS)
288
289/*
290 * Given a page frame number, convert it to a node id.
291 */
292#define PFN_TO_NID(pfn) \
293 (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MEM_SIZE_BITS - PAGE_SHIFT))
294
295/*
296 * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
297 * and returns the index corresponding to the appropriate page in the
298 * node's mem_map.
299 */
300#define LOCAL_MAP_NR(addr) \
301 (((unsigned long)(addr) & NODE_MEM_SIZE_MASK) >> PAGE_SHIFT)
302
303#endif /* NODE_MEM_SIZE_BITS */
304
305#endif /* !CONFIG_DISCONTIGMEM */
306
307/* 242/*
308 * Optional coherency support. Currently used only by selected 243 * Optional coherency support. Currently used only by selected
309 * Intel XSC3-based systems. 244 * Intel XSC3-based systems.
diff --git a/arch/arm/include/asm/mmzone.h b/arch/arm/include/asm/mmzone.h
deleted file mode 100644
index ae63a4fd28c8..000000000000
--- a/arch/arm/include/asm/mmzone.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * arch/arm/include/asm/mmzone.h
3 *
4 * 1999-12-29 Nicolas Pitre Created
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_MMZONE_H
11#define __ASM_MMZONE_H
12
13/*
14 * Currently defined in arch/arm/mm/discontig.c
15 */
16extern pg_data_t discontig_node_data[];
17
18/*
19 * Return a pointer to the node data for node n.
20 */
21#define NODE_DATA(nid) (&discontig_node_data[nid])
22
23/*
24 * NODE_MEM_MAP gives the kaddr for the mem_map of the node.
25 */
26#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
27
28#include <mach/memory.h>
29
30#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 9dcb11e59026..c974be8913a7 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -184,6 +184,42 @@ extern unsigned long profile_pc(struct pt_regs *regs);
184#define predicate(x) ((x) & 0xf0000000) 184#define predicate(x) ((x) & 0xf0000000)
185#define PREDICATE_ALWAYS 0xe0000000 185#define PREDICATE_ALWAYS 0xe0000000
186 186
187/*
188 * kprobe-based event tracer support
189 */
190#include <linux/stddef.h>
191#include <linux/types.h>
192#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
193
194extern int regs_query_register_offset(const char *name);
195extern const char *regs_query_register_name(unsigned int offset);
196extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
197extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
198 unsigned int n);
199
200/**
201 * regs_get_register() - get register value from its offset
202 * @regs: pt_regs from which register value is gotten
203 * @offset: offset number of the register.
204 *
205 * regs_get_register returns the value of a register whose offset from @regs.
206 * The @offset is the offset of the register in struct pt_regs.
207 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
208 */
209static inline unsigned long regs_get_register(struct pt_regs *regs,
210 unsigned int offset)
211{
212 if (unlikely(offset > MAX_REG_OFFSET))
213 return 0;
214 return *(unsigned long *)((unsigned long)regs + offset);
215}
216
217/* Valid only for Kernel mode traps. */
218static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
219{
220 return regs->ARM_sp;
221}
222
187#endif /* __KERNEL__ */ 223#endif /* __KERNEL__ */
188 224
189#endif /* __ASSEMBLY__ */ 225#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index f392fb4437af..f1e5a9bca249 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -201,8 +201,7 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn }
201struct membank { 201struct membank {
202 unsigned long start; 202 unsigned long start;
203 unsigned long size; 203 unsigned long size;
204 unsigned short node; 204 unsigned int highmem;
205 unsigned short highmem;
206}; 205};
207 206
208struct meminfo { 207struct meminfo {
@@ -212,9 +211,8 @@ struct meminfo {
212 211
213extern struct meminfo meminfo; 212extern struct meminfo meminfo;
214 213
215#define for_each_nodebank(iter,mi,no) \ 214#define for_each_bank(iter,mi) \
216 for (iter = 0; iter < (mi)->nr_banks; iter++) \ 215 for (iter = 0; iter < (mi)->nr_banks; iter++)
217 if ((mi)->bank[iter].node == no)
218 216
219#define bank_pfn_start(bank) __phys_to_pfn((bank)->start) 217#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
220#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) 218#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
new file mode 100644
index 000000000000..e71d6ff8d104
--- /dev/null
+++ b/arch/arm/include/asm/tls.h
@@ -0,0 +1,46 @@
1#ifndef __ASMARM_TLS_H
2#define __ASMARM_TLS_H
3
4#ifdef __ASSEMBLY__
5 .macro set_tls_none, tp, tmp1, tmp2
6 .endm
7
8 .macro set_tls_v6k, tp, tmp1, tmp2
9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
10 .endm
11
12 .macro set_tls_v6, tp, tmp1, tmp2
13 ldr \tmp1, =elf_hwcap
14 ldr \tmp1, [\tmp1, #0]
15 mov \tmp2, #0xffff0fff
16 tst \tmp1, #HWCAP_TLS @ hardware TLS available?
17 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
18 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
19 .endm
20
21 .macro set_tls_software, tp, tmp1, tmp2
22 mov \tmp1, #0xffff0fff
23 str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
24 .endm
25#endif
26
27#ifdef CONFIG_TLS_REG_EMUL
28#define tls_emu 1
29#define has_tls_reg 1
30#define set_tls set_tls_none
31#elif __LINUX_ARM_ARCH__ >= 7 || \
32 (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
33#define tls_emu 0
34#define has_tls_reg 1
35#define set_tls set_tls_v6k
36#elif __LINUX_ARM_ARCH__ == 6
37#define tls_emu 0
38#define has_tls_reg (elf_hwcap & HWCAP_TLS)
39#define set_tls set_tls_v6
40#else
41#define tls_emu 0
42#define has_tls_reg 0
43#define set_tls set_tls_software
44#endif
45
46#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index 422f3cc204a2..3d5fc41ae8d3 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -3,6 +3,8 @@
3 * 3 *
4 * Assembler-only file containing VFP macros and register definitions. 4 * Assembler-only file containing VFP macros and register definitions.
5 */ 5 */
6#include <asm/hwcap.h>
7
6#include "vfp.h" 8#include "vfp.h"
7 9
8@ Macros to allow building with old toolkits (with no VFP support) 10@ Macros to allow building with old toolkits (with no VFP support)
@@ -22,12 +24,20 @@
22 LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15} 24 LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15}
23#endif 25#endif
24#ifdef CONFIG_VFPv3 26#ifdef CONFIG_VFPv3
27#if __LINUX_ARM_ARCH__ <= 6
28 ldr \tmp, =elf_hwcap @ may not have MVFR regs
29 ldr \tmp, [\tmp, #0]
30 tst \tmp, #HWCAP_VFPv3D16
31 ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
32 addne \base, \base, #32*4 @ step over unused register space
33#else
25 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
26 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field 35 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
27 cmp \tmp, #2 @ 32 x 64bit registers? 36 cmp \tmp, #2 @ 32 x 64bit registers?
28 ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} 37 ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
29 addne \base, \base, #32*4 @ step over unused register space 38 addne \base, \base, #32*4 @ step over unused register space
30#endif 39#endif
40#endif
31 .endm 41 .endm
32 42
33 @ write all the working registers out of the VFP 43 @ write all the working registers out of the VFP
@@ -38,10 +48,18 @@
38 STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15} 48 STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15}
39#endif 49#endif
40#ifdef CONFIG_VFPv3 50#ifdef CONFIG_VFPv3
51#if __LINUX_ARM_ARCH__ <= 6
52 ldr \tmp, =elf_hwcap @ may not have MVFR regs
53 ldr \tmp, [\tmp, #0]
54 tst \tmp, #HWCAP_VFPv3D16
55 stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
56 addne \base, \base, #32*4 @ step over unused register space
57#else
41 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 58 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
42 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field 59 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
43 cmp \tmp, #2 @ 32 x 64bit registers? 60 cmp \tmp, #2 @ 32 x 64bit registers?
44 stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} 61 stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
45 addne \base, \base, #32*4 @ step over unused register space 62 addne \base, \base, #32*4 @ step over unused register space
46#endif 63#endif
64#endif
47 .endm 65 .endm