aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2014-04-28 05:31:04 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2014-04-28 05:31:04 -0400
commit67dadcb324c2fe059cb2c35f8b80df42bb23f7c4 (patch)
tree3521a2a9e05a7de426ab50bc7685489ad6340e54 /arch/arm/include/asm
parenteb47f71200b7d5b4c8c1f8c75675f592d855aafd (diff)
parente26a9e00afc482b971afcaef1db8c9034d4d6d7c (diff)
Merge commit 'e26a9e0' into stable/for-linus-3.15
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/atomic.h44
-rw-r--r--arch/arm/include/asm/cmpxchg.h6
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/floppy.h2
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/hwcap.h3
-rw-r--r--arch/arm/include/asm/jump_label.h1
-rw-r--r--arch/arm/include/asm/memory.h41
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/sync_bitops.h1
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/uaccess.h2
15 files changed, 89 insertions, 45 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c2285160575..380ac4f20000 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -30,8 +30,8 @@
30 * Endian independent macros for shifting bytes within registers. 30 * Endian independent macros for shifting bytes within registers.
31 */ 31 */
32#ifndef __ARMEB__ 32#ifndef __ARMEB__
33#define pull lsr 33#define lspull lsr
34#define push lsl 34#define lspush lsl
35#define get_byte_0 lsl #0 35#define get_byte_0 lsl #0
36#define get_byte_1 lsr #8 36#define get_byte_1 lsr #8
37#define get_byte_2 lsr #16 37#define get_byte_2 lsr #16
@@ -41,8 +41,8 @@
41#define put_byte_2 lsl #16 41#define put_byte_2 lsl #16
42#define put_byte_3 lsl #24 42#define put_byte_3 lsl #24
43#else 43#else
44#define pull lsl 44#define lspull lsl
45#define push lsr 45#define lspush lsr
46#define get_byte_0 lsr #24 46#define get_byte_0 lsr #24
47#define get_byte_1 lsr #16 47#define get_byte_1 lsr #16
48#define get_byte_2 lsr #8 48#define get_byte_2 lsr #8
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 62d2cb53b069..9a92fd7864a8 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
60 int result; 60 int result;
61 61
62 smp_mb(); 62 smp_mb();
63 prefetchw(&v->counter);
63 64
64 __asm__ __volatile__("@ atomic_add_return\n" 65 __asm__ __volatile__("@ atomic_add_return\n"
65"1: ldrex %0, [%3]\n" 66"1: ldrex %0, [%3]\n"
@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
99 int result; 100 int result;
100 101
101 smp_mb(); 102 smp_mb();
103 prefetchw(&v->counter);
102 104
103 __asm__ __volatile__("@ atomic_sub_return\n" 105 __asm__ __volatile__("@ atomic_sub_return\n"
104"1: ldrex %0, [%3]\n" 106"1: ldrex %0, [%3]\n"
@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121 unsigned long res; 123 unsigned long res;
122 124
123 smp_mb(); 125 smp_mb();
126 prefetchw(&ptr->counter);
124 127
125 do { 128 do {
126 __asm__ __volatile__("@ atomic_cmpxchg\n" 129 __asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
138 return oldval; 141 return oldval;
139} 142}
140 143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146 int oldval, newval;
147 unsigned long tmp;
148
149 smp_mb();
150 prefetchw(&v->counter);
151
152 __asm__ __volatile__ ("@ atomic_add_unless\n"
153"1: ldrex %0, [%4]\n"
154" teq %0, %5\n"
155" beq 2f\n"
156" add %1, %0, %6\n"
157" strex %2, %1, [%4]\n"
158" teq %2, #0\n"
159" bne 1b\n"
160"2:"
161 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162 : "r" (&v->counter), "r" (u), "r" (a)
163 : "cc");
164
165 if (oldval != u)
166 smp_mb();
167
168 return oldval;
169}
170
141#else /* ARM_ARCH_6 */ 171#else /* ARM_ARCH_6 */
142 172
143#ifdef CONFIG_SMP 173#ifdef CONFIG_SMP
@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
186 return ret; 216 return ret;
187} 217}
188 218
189#endif /* __LINUX_ARM_ARCH__ */
190
191#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
192
193static inline int __atomic_add_unless(atomic_t *v, int a, int u) 219static inline int __atomic_add_unless(atomic_t *v, int a, int u)
194{ 220{
195 int c, old; 221 int c, old;
@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
200 return c; 226 return c;
201} 227}
202 228
229#endif /* __LINUX_ARM_ARCH__ */
230
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
203#define atomic_inc(v) atomic_add(1, v) 233#define atomic_inc(v) atomic_add(1, v)
204#define atomic_dec(v) atomic_sub(1, v) 234#define atomic_dec(v) atomic_sub(1, v)
205 235
@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
299 unsigned long tmp; 329 unsigned long tmp;
300 330
301 smp_mb(); 331 smp_mb();
332 prefetchw(&v->counter);
302 333
303 __asm__ __volatile__("@ atomic64_add_return\n" 334 __asm__ __volatile__("@ atomic64_add_return\n"
304"1: ldrexd %0, %H0, [%3]\n" 335"1: ldrexd %0, %H0, [%3]\n"
@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
340 unsigned long tmp; 371 unsigned long tmp;
341 372
342 smp_mb(); 373 smp_mb();
374 prefetchw(&v->counter);
343 375
344 __asm__ __volatile__("@ atomic64_sub_return\n" 376 __asm__ __volatile__("@ atomic64_sub_return\n"
345"1: ldrexd %0, %H0, [%3]\n" 377"1: ldrexd %0, %H0, [%3]\n"
@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
364 unsigned long res; 396 unsigned long res;
365 397
366 smp_mb(); 398 smp_mb();
399 prefetchw(&ptr->counter);
367 400
368 do { 401 do {
369 __asm__ __volatile__("@ atomic64_cmpxchg\n" 402 __asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
388 unsigned long tmp; 421 unsigned long tmp;
389 422
390 smp_mb(); 423 smp_mb();
424 prefetchw(&ptr->counter);
391 425
392 __asm__ __volatile__("@ atomic64_xchg\n" 426 __asm__ __volatile__("@ atomic64_xchg\n"
393"1: ldrexd %0, %H0, [%3]\n" 427"1: ldrexd %0, %H0, [%3]\n"
@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
409 unsigned long tmp; 443 unsigned long tmp;
410 444
411 smp_mb(); 445 smp_mb();
446 prefetchw(&v->counter);
412 447
413 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 448 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
414"1: ldrexd %0, %H0, [%3]\n" 449"1: ldrexd %0, %H0, [%3]\n"
@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
436 int ret = 1; 471 int ret = 1;
437 472
438 smp_mb(); 473 smp_mb();
474 prefetchw(&v->counter);
439 475
440 __asm__ __volatile__("@ atomic64_add_unless\n" 476 __asm__ __volatile__("@ atomic64_add_unless\n"
441"1: ldrexd %0, %H0, [%4]\n" 477"1: ldrexd %0, %H0, [%4]\n"
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index df2fbba7efc8..abb2c3769b01 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
2#define __ASM_ARM_CMPXCHG_H 2#define __ASM_ARM_CMPXCHG_H
3 3
4#include <linux/irqflags.h> 4#include <linux/irqflags.h>
5#include <linux/prefetch.h>
5#include <asm/barrier.h> 6#include <asm/barrier.h>
6 7
7#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 8#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
35#endif 36#endif
36 37
37 smp_mb(); 38 smp_mb();
39 prefetchw((const void *)ptr);
38 40
39 switch (size) { 41 switch (size) {
40#if __LINUX_ARM_ARCH__ >= 6 42#if __LINUX_ARM_ARCH__ >= 6
@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
138{ 140{
139 unsigned long oldval, res; 141 unsigned long oldval, res;
140 142
143 prefetchw((const void *)ptr);
144
141 switch (size) { 145 switch (size) {
142#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 146#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
143 case 1: 147 case 1:
@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
230 unsigned long long oldval; 234 unsigned long long oldval;
231 unsigned long res; 235 unsigned long res;
232 236
237 prefetchw(ptr);
238
233 __asm__ __volatile__( 239 __asm__ __volatile__(
234"1: ldrexd %1, %H1, [%3]\n" 240"1: ldrexd %1, %H1, [%3]\n"
235" teq %1, %4\n" 241" teq %1, %4\n"
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index acdde76b39bb..42f0889f0584 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -71,6 +71,7 @@
71#define ARM_CPU_PART_CORTEX_A5 0xC050 71#define ARM_CPU_PART_CORTEX_A5 0xC050
72#define ARM_CPU_PART_CORTEX_A15 0xC0F0 72#define ARM_CPU_PART_CORTEX_A15 0xC0F0
73#define ARM_CPU_PART_CORTEX_A7 0xC070 73#define ARM_CPU_PART_CORTEX_A7 0xC070
74#define ARM_CPU_PART_CORTEX_A12 0xC0D0
74 75
75#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 76#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
76#define ARM_CPU_XSCALE_ARCH_V1 0x2000 77#define ARM_CPU_XSCALE_ARCH_V1 0x2000
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index c9f03eccc9d8..f4882553fbb0 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -25,7 +25,7 @@
25 25
26#define fd_inb(port) inb((port)) 26#define fd_inb(port) inb((port))
27#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ 27#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
28 IRQF_DISABLED,"floppy",NULL) 28 0,"floppy",NULL)
29#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) 29#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
30#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) 30#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
31#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) 31#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e6..53e69dae796f 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,11 +3,6 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
7/* ARM doesn't provide unprivileged exclusive memory accessors */
8#include <asm-generic/futex.h>
9#else
10
11#include <linux/futex.h> 6#include <linux/futex.h>
12#include <linux/uaccess.h> 7#include <linux/uaccess.h>
13#include <asm/errno.h> 8#include <asm/errno.h>
@@ -28,6 +23,7 @@
28 23
29#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
30 smp_mb(); \ 25 smp_mb(); \
26 prefetchw(uaddr); \
31 __asm__ __volatile__( \ 27 __asm__ __volatile__( \
32 "1: ldrex %1, [%3]\n" \ 28 "1: ldrex %1, [%3]\n" \
33 " " insn "\n" \ 29 " " insn "\n" \
@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
51 return -EFAULT; 47 return -EFAULT;
52 48
53 smp_mb(); 49 smp_mb();
50 /* Prefetching cannot fault */
51 prefetchw(uaddr);
54 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 52 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
55 "1: ldrex %1, [%4]\n" 53 "1: ldrex %1, [%4]\n"
56 " teq %1, %2\n" 54 " teq %1, %2\n"
@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
164 return ret; 162 return ret;
165} 163}
166 164
167#endif /* !(CPU_USE_DOMAINS && SMP) */
168#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
169#endif /* _ASM_ARM_FUTEX_H */ 166#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index eef55ea9ef00..8e427c7b4425 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
51#define ARM_DEBUG_ARCH_V7_ECP14 3 51#define ARM_DEBUG_ARCH_V7_ECP14 3
52#define ARM_DEBUG_ARCH_V7_MM 4 52#define ARM_DEBUG_ARCH_V7_MM 4
53#define ARM_DEBUG_ARCH_V7_1 5 53#define ARM_DEBUG_ARCH_V7_1 5
54#define ARM_DEBUG_ARCH_V8 6
54 55
55/* Breakpoint */ 56/* Breakpoint */
56#define ARM_BREAKPOINT_EXECUTE 0 57#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6ff56eca3f1f..6e183fd269fb 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -9,6 +9,7 @@
9 * instruction set this cpu supports. 9 * instruction set this cpu supports.
10 */ 10 */
11#define ELF_HWCAP (elf_hwcap) 11#define ELF_HWCAP (elf_hwcap)
12extern unsigned int elf_hwcap; 12#define ELF_HWCAP2 (elf_hwcap2)
13extern unsigned int elf_hwcap, elf_hwcap2;
13#endif 14#endif
14#endif 15#endif
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 863c892b4aaa..70f9b9bfb1f9 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,7 +4,6 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/system.h>
8 7
9#define JUMP_LABEL_NOP_SIZE 4 8#define JUMP_LABEL_NOP_SIZE 4
10 9
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4afb376d9c7c..02fa2558f662 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -166,9 +166,17 @@
166 * Physical vs virtual RAM address space conversion. These are 166 * Physical vs virtual RAM address space conversion. These are
167 * private definitions which should NOT be used outside memory.h 167 * private definitions which should NOT be used outside memory.h
168 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 168 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
169 *
170 * PFNs are used to describe any physical page; this means
171 * PFN 0 == physical address 0.
169 */ 172 */
170#ifndef __virt_to_phys 173#if defined(__virt_to_phys)
171#ifdef CONFIG_ARM_PATCH_PHYS_VIRT 174#define PHYS_OFFSET PLAT_PHYS_OFFSET
175#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
176
177#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
178
179#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
172 180
173/* 181/*
174 * Constants used to force the right instruction encodings and shifts 182 * Constants used to force the right instruction encodings and shifts
@@ -177,12 +185,17 @@
177#define __PV_BITS_31_24 0x81000000 185#define __PV_BITS_31_24 0x81000000
178#define __PV_BITS_7_0 0x81 186#define __PV_BITS_7_0 0x81
179 187
180extern u64 __pv_phys_offset; 188extern unsigned long __pv_phys_pfn_offset;
181extern u64 __pv_offset; 189extern u64 __pv_offset;
182extern void fixup_pv_table(const void *, unsigned long); 190extern void fixup_pv_table(const void *, unsigned long);
183extern const void *__pv_table_begin, *__pv_table_end; 191extern const void *__pv_table_begin, *__pv_table_end;
184 192
185#define PHYS_OFFSET __pv_phys_offset 193#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
194#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
195
196#define virt_to_pfn(kaddr) \
197 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
198 PHYS_PFN_OFFSET)
186 199
187#define __pv_stub(from,to,instr,type) \ 200#define __pv_stub(from,to,instr,type) \
188 __asm__("@ __pv_stub\n" \ 201 __asm__("@ __pv_stub\n" \
@@ -243,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
243#else 256#else
244 257
245#define PHYS_OFFSET PLAT_PHYS_OFFSET 258#define PHYS_OFFSET PLAT_PHYS_OFFSET
259#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
246 260
247static inline phys_addr_t __virt_to_phys(unsigned long x) 261static inline phys_addr_t __virt_to_phys(unsigned long x)
248{ 262{
@@ -254,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
254 return x - PHYS_OFFSET + PAGE_OFFSET; 268 return x - PHYS_OFFSET + PAGE_OFFSET;
255} 269}
256 270
257#endif 271#define virt_to_pfn(kaddr) \
258#endif 272 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
273 PHYS_PFN_OFFSET)
259 274
260/* 275#endif
261 * PFNs are used to describe any physical page; this means
262 * PFN 0 == physical address 0.
263 *
264 * This is the PFN of the first RAM page in the kernel
265 * direct-mapped view. We assume this is the first page
266 * of RAM in the mem_map as well.
267 */
268#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
269 276
270/* 277/*
271 * These are *only* valid on the kernel direct mapped RAM memory. 278 * These are *only* valid on the kernel direct mapped RAM memory.
@@ -343,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
343 */ 350 */
344#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 351#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
345 352
346#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 353#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
347#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ 354#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
348 && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) 355 && pfn_valid(virt_to_pfn(kaddr)))
349 356
350#endif 357#endif
351 358
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index dfff709fda3c..219ac88a9542 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,6 +140,7 @@
140#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ 140#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
141#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ 141#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
142#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ 142#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
143#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
143#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) 144#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
144 145
145#ifndef __ASSEMBLY__ 146#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 7d59b524f2af..5478e5d6ad89 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
216 216
217#define pte_none(pte) (!pte_val(pte)) 217#define pte_none(pte) (!pte_val(pte))
218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
219#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
220#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
219#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) 221#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
220#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 222#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
221#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 223#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
222#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 224#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
223#define pte_special(pte) (0) 225#define pte_special(pte) (0)
224 226
225#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) 227#define pte_valid_user(pte) \
228 (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
226 229
227#if __LINUX_ARM_ARCH__ < 6 230#if __LINUX_ARM_ARCH__ < 6
228static inline void __sync_icache_dcache(pte_t pteval) 231static inline void __sync_icache_dcache(pte_t pteval)
@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
237{ 240{
238 unsigned long ext = 0; 241 unsigned long ext = 0;
239 242
240 if (addr < TASK_SIZE && pte_present_user(pteval)) { 243 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
241 __sync_icache_dcache(pteval); 244 __sync_icache_dcache(pteval);
242 ext |= PTE_EXT_NG; 245 ext |= PTE_EXT_NG;
243 } 246 }
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 63479eecbf76..9732b8e11e63 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -2,7 +2,6 @@
2#define __ASM_SYNC_BITOPS_H__ 2#define __ASM_SYNC_BITOPS_H__
3 3
4#include <asm/bitops.h> 4#include <asm/bitops.h>
5#include <asm/system.h>
6 5
7/* sync_bitops functions are equivalent to the SMP implementation of the 6/* sync_bitops functions are equivalent to the SMP implementation of the
8 * original functions, independently from CONFIG_SMP being defined. 7 * original functions, independently from CONFIG_SMP being defined.
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
deleted file mode 100644
index 368165e33c1c..000000000000
--- a/arch/arm/include/asm/system.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
2#include <asm/barrier.h>
3#include <asm/compiler.h>
4#include <asm/cmpxchg.h>
5#include <asm/switch_to.h>
6#include <asm/system_info.h>
7#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 72abdc541f38..12c3a5decc60 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,7 +19,7 @@
19#include <asm/unified.h> 19#include <asm/unified.h>
20#include <asm/compiler.h> 20#include <asm/compiler.h>
21 21
22#if __LINUX_ARM_ARCH__ < 6 22#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23#include <asm-generic/uaccess-unaligned.h> 23#include <asm-generic/uaccess-unaligned.h>
24#else 24#else
25#define __get_user_unaligned __get_user 25#define __get_user_unaligned __get_user