aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild5
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/atomic.h44
-rw-r--r--arch/arm/include/asm/cmpxchg.h6
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/dma-iommu.h12
-rw-r--r--arch/arm/include/asm/floppy.h2
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/hwcap.h3
-rw-r--r--arch/arm/include/asm/jump_label.h1
-rw-r--r--arch/arm/include/asm/kprobes.h17
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h4
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmu.h30
-rw-r--r--arch/arm/include/asm/memory.h50
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/pmu.h2
-rw-r--r--arch/arm/include/asm/probes.h43
-rw-r--r--arch/arm/include/asm/ptrace.h14
-rw-r--r--arch/arm/include/asm/sync_bitops.h1
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm/include/asm/topology.h3
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/include/asm/uprobes.h45
-rw-r--r--arch/arm/include/asm/xen/page.h15
30 files changed, 255 insertions, 97 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 3278afe2c3ab..23e728ecf8ab 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,16 +7,19 @@ generic-y += current.h
7generic-y += emergency-restart.h 7generic-y += emergency-restart.h
8generic-y += errno.h 8generic-y += errno.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += hash.h
10generic-y += ioctl.h 11generic-y += ioctl.h
11generic-y += ipcbuf.h 12generic-y += ipcbuf.h
12generic-y += irq_regs.h 13generic-y += irq_regs.h
13generic-y += kdebug.h 14generic-y += kdebug.h
14generic-y += local.h 15generic-y += local.h
15generic-y += local64.h 16generic-y += local64.h
17generic-y += mcs_spinlock.h
16generic-y += msgbuf.h 18generic-y += msgbuf.h
17generic-y += param.h 19generic-y += param.h
18generic-y += parport.h 20generic-y += parport.h
19generic-y += poll.h 21generic-y += poll.h
22generic-y += preempt.h
20generic-y += resource.h 23generic-y += resource.h
21generic-y += sections.h 24generic-y += sections.h
22generic-y += segment.h 25generic-y += segment.h
@@ -33,5 +36,3 @@ generic-y += termios.h
33generic-y += timex.h 36generic-y += timex.h
34generic-y += trace_clock.h 37generic-y += trace_clock.h
35generic-y += unaligned.h 38generic-y += unaligned.h
36generic-y += preempt.h
37generic-y += hash.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c2285160575..380ac4f20000 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -30,8 +30,8 @@
30 * Endian independent macros for shifting bytes within registers. 30 * Endian independent macros for shifting bytes within registers.
31 */ 31 */
32#ifndef __ARMEB__ 32#ifndef __ARMEB__
33#define pull lsr 33#define lspull lsr
34#define push lsl 34#define lspush lsl
35#define get_byte_0 lsl #0 35#define get_byte_0 lsl #0
36#define get_byte_1 lsr #8 36#define get_byte_1 lsr #8
37#define get_byte_2 lsr #16 37#define get_byte_2 lsr #16
@@ -41,8 +41,8 @@
41#define put_byte_2 lsl #16 41#define put_byte_2 lsl #16
42#define put_byte_3 lsl #24 42#define put_byte_3 lsl #24
43#else 43#else
44#define pull lsl 44#define lspull lsl
45#define push lsr 45#define lspush lsr
46#define get_byte_0 lsr #24 46#define get_byte_0 lsr #24
47#define get_byte_1 lsr #16 47#define get_byte_1 lsr #16
48#define get_byte_2 lsr #8 48#define get_byte_2 lsr #8
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 62d2cb53b069..9a92fd7864a8 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
60 int result; 60 int result;
61 61
62 smp_mb(); 62 smp_mb();
63 prefetchw(&v->counter);
63 64
64 __asm__ __volatile__("@ atomic_add_return\n" 65 __asm__ __volatile__("@ atomic_add_return\n"
65"1: ldrex %0, [%3]\n" 66"1: ldrex %0, [%3]\n"
@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
99 int result; 100 int result;
100 101
101 smp_mb(); 102 smp_mb();
103 prefetchw(&v->counter);
102 104
103 __asm__ __volatile__("@ atomic_sub_return\n" 105 __asm__ __volatile__("@ atomic_sub_return\n"
104"1: ldrex %0, [%3]\n" 106"1: ldrex %0, [%3]\n"
@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121 unsigned long res; 123 unsigned long res;
122 124
123 smp_mb(); 125 smp_mb();
126 prefetchw(&ptr->counter);
124 127
125 do { 128 do {
126 __asm__ __volatile__("@ atomic_cmpxchg\n" 129 __asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
138 return oldval; 141 return oldval;
139} 142}
140 143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146 int oldval, newval;
147 unsigned long tmp;
148
149 smp_mb();
150 prefetchw(&v->counter);
151
152 __asm__ __volatile__ ("@ atomic_add_unless\n"
153"1: ldrex %0, [%4]\n"
154" teq %0, %5\n"
155" beq 2f\n"
156" add %1, %0, %6\n"
157" strex %2, %1, [%4]\n"
158" teq %2, #0\n"
159" bne 1b\n"
160"2:"
161 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162 : "r" (&v->counter), "r" (u), "r" (a)
163 : "cc");
164
165 if (oldval != u)
166 smp_mb();
167
168 return oldval;
169}
170
141#else /* ARM_ARCH_6 */ 171#else /* ARM_ARCH_6 */
142 172
143#ifdef CONFIG_SMP 173#ifdef CONFIG_SMP
@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
186 return ret; 216 return ret;
187} 217}
188 218
189#endif /* __LINUX_ARM_ARCH__ */
190
191#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
192
193static inline int __atomic_add_unless(atomic_t *v, int a, int u) 219static inline int __atomic_add_unless(atomic_t *v, int a, int u)
194{ 220{
195 int c, old; 221 int c, old;
@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
200 return c; 226 return c;
201} 227}
202 228
229#endif /* __LINUX_ARM_ARCH__ */
230
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
203#define atomic_inc(v) atomic_add(1, v) 233#define atomic_inc(v) atomic_add(1, v)
204#define atomic_dec(v) atomic_sub(1, v) 234#define atomic_dec(v) atomic_sub(1, v)
205 235
@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
299 unsigned long tmp; 329 unsigned long tmp;
300 330
301 smp_mb(); 331 smp_mb();
332 prefetchw(&v->counter);
302 333
303 __asm__ __volatile__("@ atomic64_add_return\n" 334 __asm__ __volatile__("@ atomic64_add_return\n"
304"1: ldrexd %0, %H0, [%3]\n" 335"1: ldrexd %0, %H0, [%3]\n"
@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
340 unsigned long tmp; 371 unsigned long tmp;
341 372
342 smp_mb(); 373 smp_mb();
374 prefetchw(&v->counter);
343 375
344 __asm__ __volatile__("@ atomic64_sub_return\n" 376 __asm__ __volatile__("@ atomic64_sub_return\n"
345"1: ldrexd %0, %H0, [%3]\n" 377"1: ldrexd %0, %H0, [%3]\n"
@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
364 unsigned long res; 396 unsigned long res;
365 397
366 smp_mb(); 398 smp_mb();
399 prefetchw(&ptr->counter);
367 400
368 do { 401 do {
369 __asm__ __volatile__("@ atomic64_cmpxchg\n" 402 __asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
388 unsigned long tmp; 421 unsigned long tmp;
389 422
390 smp_mb(); 423 smp_mb();
424 prefetchw(&ptr->counter);
391 425
392 __asm__ __volatile__("@ atomic64_xchg\n" 426 __asm__ __volatile__("@ atomic64_xchg\n"
393"1: ldrexd %0, %H0, [%3]\n" 427"1: ldrexd %0, %H0, [%3]\n"
@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
409 unsigned long tmp; 443 unsigned long tmp;
410 444
411 smp_mb(); 445 smp_mb();
446 prefetchw(&v->counter);
412 447
413 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 448 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
414"1: ldrexd %0, %H0, [%3]\n" 449"1: ldrexd %0, %H0, [%3]\n"
@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
436 int ret = 1; 471 int ret = 1;
437 472
438 smp_mb(); 473 smp_mb();
474 prefetchw(&v->counter);
439 475
440 __asm__ __volatile__("@ atomic64_add_unless\n" 476 __asm__ __volatile__("@ atomic64_add_unless\n"
441"1: ldrexd %0, %H0, [%4]\n" 477"1: ldrexd %0, %H0, [%4]\n"
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index df2fbba7efc8..abb2c3769b01 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
2#define __ASM_ARM_CMPXCHG_H 2#define __ASM_ARM_CMPXCHG_H
3 3
4#include <linux/irqflags.h> 4#include <linux/irqflags.h>
5#include <linux/prefetch.h>
5#include <asm/barrier.h> 6#include <asm/barrier.h>
6 7
7#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 8#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
35#endif 36#endif
36 37
37 smp_mb(); 38 smp_mb();
39 prefetchw((const void *)ptr);
38 40
39 switch (size) { 41 switch (size) {
40#if __LINUX_ARM_ARCH__ >= 6 42#if __LINUX_ARM_ARCH__ >= 6
@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
138{ 140{
139 unsigned long oldval, res; 141 unsigned long oldval, res;
140 142
143 prefetchw((const void *)ptr);
144
141 switch (size) { 145 switch (size) {
142#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 146#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
143 case 1: 147 case 1:
@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
230 unsigned long long oldval; 234 unsigned long long oldval;
231 unsigned long res; 235 unsigned long res;
232 236
237 prefetchw(ptr);
238
233 __asm__ __volatile__( 239 __asm__ __volatile__(
234"1: ldrexd %1, %H1, [%3]\n" 240"1: ldrexd %1, %H1, [%3]\n"
235" teq %1, %4\n" 241" teq %1, %4\n"
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index acdde76b39bb..42f0889f0584 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -71,6 +71,7 @@
71#define ARM_CPU_PART_CORTEX_A5 0xC050 71#define ARM_CPU_PART_CORTEX_A5 0xC050
72#define ARM_CPU_PART_CORTEX_A15 0xC0F0 72#define ARM_CPU_PART_CORTEX_A15 0xC0F0
73#define ARM_CPU_PART_CORTEX_A7 0xC070 73#define ARM_CPU_PART_CORTEX_A7 0xC070
74#define ARM_CPU_PART_CORTEX_A12 0xC0D0
74 75
75#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 76#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
76#define ARM_CPU_XSCALE_ARCH_V1 0x2000 77#define ARM_CPU_XSCALE_ARCH_V1 0x2000
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index a8c56acc8c98..eec0a12c5c1d 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -13,9 +13,12 @@ struct dma_iommu_mapping {
13 /* iommu specific data */ 13 /* iommu specific data */
14 struct iommu_domain *domain; 14 struct iommu_domain *domain;
15 15
16 void *bitmap; 16 unsigned long **bitmaps; /* array of bitmaps */
17 size_t bits; 17 unsigned int nr_bitmaps; /* nr of elements in array */
18 unsigned int order; 18 unsigned int extensions;
19 size_t bitmap_size; /* size of a single bitmap */
20 size_t bits; /* per bitmap */
21 unsigned int size; /* per bitmap */
19 dma_addr_t base; 22 dma_addr_t base;
20 23
21 spinlock_t lock; 24 spinlock_t lock;
@@ -23,8 +26,7 @@ struct dma_iommu_mapping {
23}; 26};
24 27
25struct dma_iommu_mapping * 28struct dma_iommu_mapping *
26arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 29arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
27 int order);
28 30
29void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); 31void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
30 32
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index c9f03eccc9d8..f4882553fbb0 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -25,7 +25,7 @@
25 25
26#define fd_inb(port) inb((port)) 26#define fd_inb(port) inb((port))
27#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ 27#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
28 IRQF_DISABLED,"floppy",NULL) 28 0,"floppy",NULL)
29#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) 29#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
30#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) 30#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
31#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) 31#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e6..53e69dae796f 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,11 +3,6 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
7/* ARM doesn't provide unprivileged exclusive memory accessors */
8#include <asm-generic/futex.h>
9#else
10
11#include <linux/futex.h> 6#include <linux/futex.h>
12#include <linux/uaccess.h> 7#include <linux/uaccess.h>
13#include <asm/errno.h> 8#include <asm/errno.h>
@@ -28,6 +23,7 @@
28 23
29#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
30 smp_mb(); \ 25 smp_mb(); \
26 prefetchw(uaddr); \
31 __asm__ __volatile__( \ 27 __asm__ __volatile__( \
32 "1: ldrex %1, [%3]\n" \ 28 "1: ldrex %1, [%3]\n" \
33 " " insn "\n" \ 29 " " insn "\n" \
@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
51 return -EFAULT; 47 return -EFAULT;
52 48
53 smp_mb(); 49 smp_mb();
50 /* Prefetching cannot fault */
51 prefetchw(uaddr);
54 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 52 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
55 "1: ldrex %1, [%4]\n" 53 "1: ldrex %1, [%4]\n"
56 " teq %1, %2\n" 54 " teq %1, %2\n"
@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
164 return ret; 162 return ret;
165} 163}
166 164
167#endif /* !(CPU_USE_DOMAINS && SMP) */
168#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
169#endif /* _ASM_ARM_FUTEX_H */ 166#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index eef55ea9ef00..8e427c7b4425 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
51#define ARM_DEBUG_ARCH_V7_ECP14 3 51#define ARM_DEBUG_ARCH_V7_ECP14 3
52#define ARM_DEBUG_ARCH_V7_MM 4 52#define ARM_DEBUG_ARCH_V7_MM 4
53#define ARM_DEBUG_ARCH_V7_1 5 53#define ARM_DEBUG_ARCH_V7_1 5
54#define ARM_DEBUG_ARCH_V8 6
54 55
55/* Breakpoint */ 56/* Breakpoint */
56#define ARM_BREAKPOINT_EXECUTE 0 57#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6ff56eca3f1f..6e183fd269fb 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -9,6 +9,7 @@
9 * instruction set this cpu supports. 9 * instruction set this cpu supports.
10 */ 10 */
11#define ELF_HWCAP (elf_hwcap) 11#define ELF_HWCAP (elf_hwcap)
12extern unsigned int elf_hwcap; 12#define ELF_HWCAP2 (elf_hwcap2)
13extern unsigned int elf_hwcap, elf_hwcap2;
13#endif 14#endif
14#endif 15#endif
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 863c892b4aaa..70f9b9bfb1f9 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,7 +4,6 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/system.h>
8 7
9#define JUMP_LABEL_NOP_SIZE 4 8#define JUMP_LABEL_NOP_SIZE 4
10 9
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index f82ec22eeb11..49fa0dfaad33 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -18,7 +18,7 @@
18 18
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/ptrace.h> 20#include <linux/ptrace.h>
21#include <linux/percpu.h> 21#include <linux/notifier.h>
22 22
23#define __ARCH_WANT_KPROBES_INSN_SLOT 23#define __ARCH_WANT_KPROBES_INSN_SLOT
24#define MAX_INSN_SIZE 2 24#define MAX_INSN_SIZE 2
@@ -28,21 +28,10 @@
28#define kretprobe_blacklist_size 0 28#define kretprobe_blacklist_size 0
29 29
30typedef u32 kprobe_opcode_t; 30typedef u32 kprobe_opcode_t;
31
32struct kprobe; 31struct kprobe;
33typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); 32#include <asm/probes.h>
34typedef unsigned long (kprobe_check_cc)(unsigned long);
35typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
36typedef void (kprobe_insn_fn_t)(void);
37 33
38/* Architecture specific copy of original instruction. */ 34#define arch_specific_insn arch_probes_insn
39struct arch_specific_insn {
40 kprobe_opcode_t *insn;
41 kprobe_insn_handler_t *insn_handler;
42 kprobe_check_cc *insn_check_cc;
43 kprobe_insn_singlestep_t *insn_singlestep;
44 kprobe_insn_fn_t *insn_fn;
45};
46 35
47struct prev_kprobe { 36struct prev_kprobe {
48 struct kprobe *kp; 37 struct kprobe *kp;
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 1d3153c7eb41..816db0bf2dd8 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -55,6 +55,7 @@
55 * The bits we set in HCR: 55 * The bits we set in HCR:
56 * TAC: Trap ACTLR 56 * TAC: Trap ACTLR
57 * TSC: Trap SMC 57 * TSC: Trap SMC
58 * TVM: Trap VM ops (until MMU and caches are on)
58 * TSW: Trap cache operations by set/way 59 * TSW: Trap cache operations by set/way
59 * TWI: Trap WFI 60 * TWI: Trap WFI
60 * TWE: Trap WFE 61 * TWE: Trap WFE
@@ -68,8 +69,7 @@
68 */ 69 */
69#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ 70#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
70 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ 71 HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
71 HCR_TWE | HCR_SWIO | HCR_TIDCP) 72 HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
72#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
73 73
74/* System Control Register (SCTLR) bits */ 74/* System Control Register (SCTLR) bits */
75#define SCTLR_TE (1 << 30) 75#define SCTLR_TE (1 << 30)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 661da11f76f4..53b3c4a50d5c 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -48,7 +48,9 @@
48#define c13_TID_URO 26 /* Thread ID, User R/O */ 48#define c13_TID_URO 26 /* Thread ID, User R/O */
49#define c13_TID_PRIV 27 /* Thread ID, Privileged */ 49#define c13_TID_PRIV 27 /* Thread ID, Privileged */
50#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ 50#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
51#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */ 51#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
52#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
53#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
52 54
53#define ARM_EXCEPTION_RESET 0 55#define ARM_EXCEPTION_RESET 0
54#define ARM_EXCEPTION_UNDEFINED 1 56#define ARM_EXCEPTION_UNDEFINED 1
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 098f7dd6d564..09af14999c9b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -101,6 +101,12 @@ struct kvm_vcpu_arch {
101 /* The CPU type we expose to the VM */ 101 /* The CPU type we expose to the VM */
102 u32 midr; 102 u32 midr;
103 103
104 /* HYP trapping configuration */
105 u32 hcr;
106
107 /* Interrupt related fields */
108 u32 irq_lines; /* IRQ and FIQ levels */
109
104 /* Exception Information */ 110 /* Exception Information */
105 struct kvm_vcpu_fault_info fault; 111 struct kvm_vcpu_fault_info fault;
106 112
@@ -128,9 +134,6 @@ struct kvm_vcpu_arch {
128 /* IO related fields */ 134 /* IO related fields */
129 struct kvm_decode mmio_decode; 135 struct kvm_decode mmio_decode;
130 136
131 /* Interrupt related fields */
132 u32 irq_lines; /* IRQ and FIQ levels */
133
134 /* Cache some mmu pages needed inside spinlock regions */ 137 /* Cache some mmu pages needed inside spinlock regions */
135 struct kvm_mmu_memory_cache mmu_page_cache; 138 struct kvm_mmu_memory_cache mmu_page_cache;
136 139
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 2d122adcdb22..5c7aa3c1519f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -114,11 +114,34 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
114 pmd_val(*pmd) |= L_PMD_S2_RDWR; 114 pmd_val(*pmd) |= L_PMD_S2_RDWR;
115} 115}
116 116
117/* Open coded p*d_addr_end that can deal with 64bit addresses */
118#define kvm_pgd_addr_end(addr, end) \
119({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
120 (__boundary - 1 < (end) - 1)? __boundary: (end); \
121})
122
123#define kvm_pud_addr_end(addr,end) (end)
124
125#define kvm_pmd_addr_end(addr, end) \
126({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
127 (__boundary - 1 < (end) - 1)? __boundary: (end); \
128})
129
117struct kvm; 130struct kvm;
118 131
119static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, 132#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
120 unsigned long size) 133
134static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
121{ 135{
136 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
137}
138
139static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
140 unsigned long size)
141{
142 if (!vcpu_has_cache_enabled(vcpu))
143 kvm_flush_dcache_to_poc((void *)hva, size);
144
122 /* 145 /*
123 * If we are going to insert an instruction page and the icache is 146 * If we are going to insert an instruction page and the icache is
124 * either VIPT or PIPT, there is a potential problem where the host 147 * either VIPT or PIPT, there is a potential problem where the host
@@ -139,9 +162,10 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
139 } 162 }
140} 163}
141 164
142#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
143#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 165#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
144 166
167void stage2_flush_vm(struct kvm *kvm);
168
145#endif /* !__ASSEMBLY__ */ 169#endif /* !__ASSEMBLY__ */
146 170
147#endif /* __ARM_KVM_MMU_H__ */ 171#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 8756e4bcdba0..02fa2558f662 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -30,14 +30,15 @@
30 */ 30 */
31#define UL(x) _AC(x, UL) 31#define UL(x) _AC(x, UL)
32 32
33/* PAGE_OFFSET - the virtual address of the start of the kernel image */
34#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
35
33#ifdef CONFIG_MMU 36#ifdef CONFIG_MMU
34 37
35/* 38/*
36 * PAGE_OFFSET - the virtual address of the start of the kernel image
37 * TASK_SIZE - the maximum size of a user space task. 39 * TASK_SIZE - the maximum size of a user space task.
38 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 40 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
39 */ 41 */
40#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
41#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) 42#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
42#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 43#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
43 44
@@ -104,10 +105,6 @@
104#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 105#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
105#endif 106#endif
106 107
107#ifndef PAGE_OFFSET
108#define PAGE_OFFSET PLAT_PHYS_OFFSET
109#endif
110
111/* 108/*
112 * The module can be at any place in ram in nommu mode. 109 * The module can be at any place in ram in nommu mode.
113 */ 110 */
@@ -169,9 +166,17 @@
169 * Physical vs virtual RAM address space conversion. These are 166 * Physical vs virtual RAM address space conversion. These are
170 * private definitions which should NOT be used outside memory.h 167 * private definitions which should NOT be used outside memory.h
171 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 168 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
169 *
170 * PFNs are used to describe any physical page; this means
171 * PFN 0 == physical address 0.
172 */ 172 */
173#ifndef __virt_to_phys 173#if defined(__virt_to_phys)
174#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 174#define PHYS_OFFSET PLAT_PHYS_OFFSET
175#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
176
177#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
178
179#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
175 180
176/* 181/*
177 * Constants used to force the right instruction encodings and shifts 182 * Constants used to force the right instruction encodings and shifts
@@ -180,12 +185,17 @@
180#define __PV_BITS_31_24 0x81000000 185#define __PV_BITS_31_24 0x81000000
181#define __PV_BITS_7_0 0x81 186#define __PV_BITS_7_0 0x81
182 187
183extern u64 __pv_phys_offset; 188extern unsigned long __pv_phys_pfn_offset;
184extern u64 __pv_offset; 189extern u64 __pv_offset;
185extern void fixup_pv_table(const void *, unsigned long); 190extern void fixup_pv_table(const void *, unsigned long);
186extern const void *__pv_table_begin, *__pv_table_end; 191extern const void *__pv_table_begin, *__pv_table_end;
187 192
188#define PHYS_OFFSET __pv_phys_offset 193#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
194#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
195
196#define virt_to_pfn(kaddr) \
197 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
198 PHYS_PFN_OFFSET)
189 199
190#define __pv_stub(from,to,instr,type) \ 200#define __pv_stub(from,to,instr,type) \
191 __asm__("@ __pv_stub\n" \ 201 __asm__("@ __pv_stub\n" \
@@ -246,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
246#else 256#else
247 257
248#define PHYS_OFFSET PLAT_PHYS_OFFSET 258#define PHYS_OFFSET PLAT_PHYS_OFFSET
259#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
249 260
250static inline phys_addr_t __virt_to_phys(unsigned long x) 261static inline phys_addr_t __virt_to_phys(unsigned long x)
251{ 262{
@@ -257,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
257 return x - PHYS_OFFSET + PAGE_OFFSET; 268 return x - PHYS_OFFSET + PAGE_OFFSET;
258} 269}
259 270
260#endif 271#define virt_to_pfn(kaddr) \
261#endif 272 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
273 PHYS_PFN_OFFSET)
262 274
263/* 275#endif
264 * PFNs are used to describe any physical page; this means
265 * PFN 0 == physical address 0.
266 *
267 * This is the PFN of the first RAM page in the kernel
268 * direct-mapped view. We assume this is the first page
269 * of RAM in the mem_map as well.
270 */
271#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
272 276
273/* 277/*
274 * These are *only* valid on the kernel direct mapped RAM memory. 278 * These are *only* valid on the kernel direct mapped RAM memory.
@@ -346,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
346 */ 350 */
347#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 351#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
348 352
349#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 353#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
350#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ 354#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
351 && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) 355 && pfn_valid(virt_to_pfn(kaddr)))
352 356
353#endif 357#endif
354 358
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index dfff709fda3c..219ac88a9542 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,6 +140,7 @@
140#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ 140#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
141#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ 141#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
142#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ 142#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
143#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
143#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) 144#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
144 145
145#ifndef __ASSEMBLY__ 146#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 7d59b524f2af..5478e5d6ad89 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
216 216
217#define pte_none(pte) (!pte_val(pte)) 217#define pte_none(pte) (!pte_val(pte))
218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
219#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
220#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
219#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) 221#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
220#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 222#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
221#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 223#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
222#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 224#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
223#define pte_special(pte) (0) 225#define pte_special(pte) (0)
224 226
225#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) 227#define pte_valid_user(pte) \
228 (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
226 229
227#if __LINUX_ARM_ARCH__ < 6 230#if __LINUX_ARM_ARCH__ < 6
228static inline void __sync_icache_dcache(pte_t pteval) 231static inline void __sync_icache_dcache(pte_t pteval)
@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
237{ 240{
238 unsigned long ext = 0; 241 unsigned long ext = 0;
239 242
240 if (addr < TASK_SIZE && pte_present_user(pteval)) { 243 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
241 __sync_icache_dcache(pteval); 244 __sync_icache_dcache(pteval);
242 ext |= PTE_EXT_NG; 245 ext |= PTE_EXT_NG;
243 } 246 }
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index f24edad26c70..ae1919be8f98 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -71,6 +71,8 @@ struct arm_pmu {
71 void (*disable)(struct perf_event *event); 71 void (*disable)(struct perf_event *event);
72 int (*get_event_idx)(struct pmu_hw_events *hw_events, 72 int (*get_event_idx)(struct pmu_hw_events *hw_events,
73 struct perf_event *event); 73 struct perf_event *event);
74 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
75 struct perf_event *event);
74 int (*set_event_filter)(struct hw_perf_event *evt, 76 int (*set_event_filter)(struct hw_perf_event *evt,
75 struct perf_event_attr *attr); 77 struct perf_event_attr *attr);
76 u32 (*read_counter)(struct perf_event *event); 78 u32 (*read_counter)(struct perf_event *event);
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
new file mode 100644
index 000000000000..806cfe622a9e
--- /dev/null
+++ b/arch/arm/include/asm/probes.h
@@ -0,0 +1,43 @@
1/*
2 * arch/arm/include/asm/probes.h
3 *
4 * Original contents copied from arch/arm/include/asm/kprobes.h
5 * which contains the following notice...
6 *
7 * Copyright (C) 2006, 2007 Motorola Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#ifndef _ASM_PROBES_H
20#define _ASM_PROBES_H
21
22typedef u32 probes_opcode_t;
23
24struct arch_probes_insn;
25typedef void (probes_insn_handler_t)(probes_opcode_t,
26 struct arch_probes_insn *,
27 struct pt_regs *);
28typedef unsigned long (probes_check_cc)(unsigned long);
29typedef void (probes_insn_singlestep_t)(probes_opcode_t,
30 struct arch_probes_insn *,
31 struct pt_regs *);
32typedef void (probes_insn_fn_t)(void);
33
34/* Architecture specific copy of original instruction. */
35struct arch_probes_insn {
36 probes_opcode_t *insn;
37 probes_insn_handler_t *insn_handler;
38 probes_check_cc *insn_check_cc;
39 probes_insn_singlestep_t *insn_singlestep;
40 probes_insn_fn_t *insn_fn;
41};
42
43#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 04c99f36ff7f..c877654fe3bf 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -27,9 +27,13 @@ struct pt_regs {
27#define thumb_mode(regs) (0) 27#define thumb_mode(regs) (0)
28#endif 28#endif
29 29
30#ifndef CONFIG_CPU_V7M
30#define isa_mode(regs) \ 31#define isa_mode(regs) \
31 ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ 32 ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
32 (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) 33 (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
34#else
35#define isa_mode(regs) 1 /* Thumb */
36#endif
33 37
34#define processor_mode(regs) \ 38#define processor_mode(regs) \
35 ((regs)->ARM_cpsr & MODE_MASK) 39 ((regs)->ARM_cpsr & MODE_MASK)
@@ -80,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)
80 84
81#define instruction_pointer(regs) (regs)->ARM_pc 85#define instruction_pointer(regs) (regs)->ARM_pc
82 86
87static inline void instruction_pointer_set(struct pt_regs *regs,
88 unsigned long val)
89{
90 instruction_pointer(regs) = val;
91}
92
83#ifdef CONFIG_SMP 93#ifdef CONFIG_SMP
84extern unsigned long profile_pc(struct pt_regs *regs); 94extern unsigned long profile_pc(struct pt_regs *regs);
85#else 95#else
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 63479eecbf76..9732b8e11e63 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -2,7 +2,6 @@
2#define __ASM_SYNC_BITOPS_H__ 2#define __ASM_SYNC_BITOPS_H__
3 3
4#include <asm/bitops.h> 4#include <asm/bitops.h>
5#include <asm/system.h>
6 5
7/* sync_bitops functions are equivalent to the SMP implementation of the 6/* sync_bitops functions are equivalent to the SMP implementation of the
8 * original functions, independently from CONFIG_SMP being defined. 7 * original functions, independently from CONFIG_SMP being defined.
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
deleted file mode 100644
index 368165e33c1c..000000000000
--- a/arch/arm/include/asm/system.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
2#include <asm/barrier.h>
3#include <asm/compiler.h>
4#include <asm/cmpxchg.h>
5#include <asm/switch_to.h>
6#include <asm/system_info.h>
7#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 71a06b293489..f989d7c22dc5 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
153#define TIF_SIGPENDING 0 153#define TIF_SIGPENDING 0
154#define TIF_NEED_RESCHED 1 154#define TIF_NEED_RESCHED 1
155#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 155#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
156#define TIF_UPROBE 7
156#define TIF_SYSCALL_TRACE 8 157#define TIF_SYSCALL_TRACE 8
157#define TIF_SYSCALL_AUDIT 9 158#define TIF_SYSCALL_AUDIT 9
158#define TIF_SYSCALL_TRACEPOINT 10 159#define TIF_SYSCALL_TRACEPOINT 10
@@ -165,6 +166,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
165#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 166#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
166#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 167#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
167#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 168#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
169#define _TIF_UPROBE (1 << TIF_UPROBE)
168#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 170#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
169#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 171#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
170#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 172#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -178,7 +180,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
178/* 180/*
179 * Change these and you break ASM code in entry-common.S 181 * Change these and you break ASM code in entry-common.S
180 */ 182 */
181#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME) 183#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
184 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
182 185
183#endif /* __KERNEL__ */ 186#endif /* __KERNEL__ */
184#endif /* __ASM_ARM_THREAD_INFO_H */ 187#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd2..2fe85fff5cca 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -20,9 +20,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) 20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) 21#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
22 22
23#define mc_capable() (cpu_topology[0].socket_id != -1)
24#define smt_capable() (cpu_topology[0].thread_id != -1)
25
26void init_cpu_topology(void); 23void init_cpu_topology(void);
27void store_cpu_topology(unsigned int cpuid); 24void store_cpu_topology(unsigned int cpuid);
28const struct cpumask *cpu_coregroup_mask(int cpu); 25const struct cpumask *cpu_coregroup_mask(int cpu);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 72abdc541f38..12c3a5decc60 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,7 +19,7 @@
19#include <asm/unified.h> 19#include <asm/unified.h>
20#include <asm/compiler.h> 20#include <asm/compiler.h>
21 21
22#if __LINUX_ARM_ARCH__ < 6 22#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23#include <asm-generic/uaccess-unaligned.h> 23#include <asm-generic/uaccess-unaligned.h>
24#else 24#else
25#define __get_user_unaligned __get_user 25#define __get_user_unaligned __get_user
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index acabef1a75df..43876245fc57 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -48,6 +48,5 @@
48 */ 48 */
49#define __IGNORE_fadvise64_64 49#define __IGNORE_fadvise64_64
50#define __IGNORE_migrate_pages 50#define __IGNORE_migrate_pages
51#define __IGNORE_kcmp
52 51
53#endif /* __ASM_ARM_UNISTD_H */ 52#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h
new file mode 100644
index 000000000000..9472c20b7d49
--- /dev/null
+++ b/arch/arm/include/asm/uprobes.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_UPROBES_H
10#define _ASM_UPROBES_H
11
12#include <asm/probes.h>
13#include <asm/opcodes.h>
14
15typedef u32 uprobe_opcode_t;
16
17#define MAX_UINSN_BYTES 4
18#define UPROBE_XOL_SLOT_BYTES 64
19
20#define UPROBE_SWBP_ARM_INSN 0xe7f001f9
21#define UPROBE_SS_ARM_INSN 0xe7f001fa
22#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN)
23#define UPROBE_SWBP_INSN_SIZE 4
24
25struct arch_uprobe_task {
26 u32 backup;
27 unsigned long saved_trap_no;
28};
29
30struct arch_uprobe {
31 u8 insn[MAX_UINSN_BYTES];
32 unsigned long ixol[2];
33 uprobe_opcode_t bpinsn;
34 bool simulate;
35 u32 pcreg;
36 void (*prehandler)(struct arch_uprobe *auprobe,
37 struct arch_uprobe_task *autask,
38 struct pt_regs *regs);
39 void (*posthandler)(struct arch_uprobe *auprobe,
40 struct arch_uprobe_task *autask,
41 struct pt_regs *regs);
42 struct arch_probes_insn asi;
43};
44
45#endif
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index e0965abacb7d..cf4f3e867395 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -97,16 +97,13 @@ static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
97 return NULL; 97 return NULL;
98} 98}
99 99
100static inline int m2p_add_override(unsigned long mfn, struct page *page, 100extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
101 struct gnttab_map_grant_ref *kmap_op) 101 struct gnttab_map_grant_ref *kmap_ops,
102{ 102 struct page **pages, unsigned int count);
103 return 0;
104}
105 103
106static inline int m2p_remove_override(struct page *page, bool clear_pte) 104extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
107{ 105 struct gnttab_map_grant_ref *kmap_ops,
108 return 0; 106 struct page **pages, unsigned int count);
109}
110 107
111bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 108bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
112bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 109bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,