aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/arch_timer.h19
-rw-r--r--arch/arm/include/asm/cacheflush.h6
-rw-r--r--arch/arm/include/asm/cmpxchg.h73
-rw-r--r--arch/arm/include/asm/glue-df.h8
-rw-r--r--arch/arm/include/asm/glue-proc.h18
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/mach/pci.h17
-rw-r--r--arch/arm/include/asm/mach/time.h5
-rw-r--r--arch/arm/include/asm/mmu.h7
-rw-r--r--arch/arm/include/asm/mmu_context.h104
-rw-r--r--arch/arm/include/asm/page.h9
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/include/asm/syscall.h93
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h21
16 files changed, 227 insertions, 163 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
new file mode 100644
index 000000000000..ed2e95d46e29
--- /dev/null
+++ b/arch/arm/include/asm/arch_timer.h
@@ -0,0 +1,19 @@
1#ifndef __ASMARM_ARCH_TIMER_H
2#define __ASMARM_ARCH_TIMER_H
3
4#ifdef CONFIG_ARM_ARCH_TIMER
5int arch_timer_of_register(void);
6int arch_timer_sched_clock_init(void);
7#else
8static inline int arch_timer_of_register(void)
9{
10 return -ENXIO;
11}
12
13static inline int arch_timer_sched_clock_init(void)
14{
15 return -ENXIO;
16}
17#endif
18
19#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c72682..004c1bc95d2b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -101,7 +101,7 @@ struct cpu_cache_fns {
101 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 101 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
102 102
103 void (*coherent_kern_range)(unsigned long, unsigned long); 103 void (*coherent_kern_range)(unsigned long, unsigned long);
104 void (*coherent_user_range)(unsigned long, unsigned long); 104 int (*coherent_user_range)(unsigned long, unsigned long);
105 void (*flush_kern_dcache_area)(void *, size_t); 105 void (*flush_kern_dcache_area)(void *, size_t);
106 106
107 void (*dma_map_area)(const void *, size_t, int); 107 void (*dma_map_area)(const void *, size_t, int);
@@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void);
142extern void __cpuc_flush_user_all(void); 142extern void __cpuc_flush_user_all(void);
143extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 143extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
144extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 144extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
145extern void __cpuc_coherent_user_range(unsigned long, unsigned long); 145extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
146extern void __cpuc_flush_dcache_area(void *, size_t); 146extern void __cpuc_flush_dcache_area(void *, size_t);
147 147
148/* 148/*
@@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
249 * Harvard caches are synchronised for the user space address range. 249 * Harvard caches are synchronised for the user space address range.
250 * This is used for the ARM private sys_cacheflush system call. 250 * This is used for the ARM private sys_cacheflush system call.
251 */ 251 */
252#define flush_cache_user_range(vma,start,end) \ 252#define flush_cache_user_range(start,end) \
253 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) 253 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
254 254
255/* 255/*
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index d41d7cbf0ada..7eb18c1d8d6c 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
229 (unsigned long)(n), \ 229 (unsigned long)(n), \
230 sizeof(*(ptr)))) 230 sizeof(*(ptr))))
231 231
232#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 232#define cmpxchg64(ptr, o, n) \
233 233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
234/* 234 atomic64_t, \
235 * Note : ARMv7-M (currently unsupported by Linux) does not support 235 counter), \
236 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should 236 (unsigned long)(o), \
237 * not be allowed to use __cmpxchg64. 237 (unsigned long)(n)))
238 */ 238
239static inline unsigned long long __cmpxchg64(volatile void *ptr, 239#define cmpxchg64_local(ptr, o, n) \
240 unsigned long long old, 240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
241 unsigned long long new) 241 local64_t, \
242{ 242 a), \
243 register unsigned long long oldval asm("r0"); 243 (unsigned long)(o), \
244 register unsigned long long __old asm("r2") = old; 244 (unsigned long)(n)))
245 register unsigned long long __new asm("r4") = new;
246 unsigned long res;
247
248 do {
249 asm volatile(
250 " @ __cmpxchg8\n"
251 " ldrexd %1, %H1, [%2]\n"
252 " mov %0, #0\n"
253 " teq %1, %3\n"
254 " teqeq %H1, %H3\n"
255 " strexdeq %0, %4, %H4, [%2]\n"
256 : "=&r" (res), "=&r" (oldval)
257 : "r" (ptr), "Ir" (__old), "r" (__new)
258 : "memory", "cc");
259 } while (res);
260
261 return oldval;
262}
263
264static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
265 unsigned long long old,
266 unsigned long long new)
267{
268 unsigned long long ret;
269
270 smp_mb();
271 ret = __cmpxchg64(ptr, old, new);
272 smp_mb();
273
274 return ret;
275}
276
277#define cmpxchg64(ptr,o,n) \
278 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
279 (unsigned long long)(o), \
280 (unsigned long long)(n)))
281
282#define cmpxchg64_local(ptr,o,n) \
283 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
284 (unsigned long long)(o), \
285 (unsigned long long)(n)))
286
287#else /* min ARCH = ARMv6 */
288
289#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
290
291#endif
292 245
293#endif /* __LINUX_ARM_ARCH__ >= 6 */ 246#endif /* __LINUX_ARM_ARCH__ >= 6 */
294 247
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 354d571e8bcc..8cacbcda76da 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -31,14 +31,6 @@
31#undef CPU_DABORT_HANDLER 31#undef CPU_DABORT_HANDLER
32#undef MULTI_DABORT 32#undef MULTI_DABORT
33 33
34#if defined(CONFIG_CPU_ARM610)
35# ifdef CPU_DABORT_HANDLER
36# define MULTI_DABORT 1
37# else
38# define CPU_DABORT_HANDLER cpu_arm6_data_abort
39# endif
40#endif
41
42#if defined(CONFIG_CPU_ARM710) 34#if defined(CONFIG_CPU_ARM710)
43# ifdef CPU_DABORT_HANDLER 35# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1 36# define MULTI_DABORT 1
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index e2be7f142668..ac1dd54724b6 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -23,15 +23,6 @@
23 * CPU_NAME - the prefix for CPU related functions 23 * CPU_NAME - the prefix for CPU related functions
24 */ 24 */
25 25
26#ifdef CONFIG_CPU_ARM610
27# ifdef CPU_NAME
28# undef MULTI_CPU
29# define MULTI_CPU
30# else
31# define CPU_NAME cpu_arm6
32# endif
33#endif
34
35#ifdef CONFIG_CPU_ARM7TDMI 26#ifdef CONFIG_CPU_ARM7TDMI
36# ifdef CPU_NAME 27# ifdef CPU_NAME
37# undef MULTI_CPU 28# undef MULTI_CPU
@@ -41,15 +32,6 @@
41# endif 32# endif
42#endif 33#endif
43 34
44#ifdef CONFIG_CPU_ARM710
45# ifdef CPU_NAME
46# undef MULTI_CPU
47# define MULTI_CPU
48# else
49# define CPU_NAME cpu_arm7
50# endif
51#endif
52
53#ifdef CONFIG_CPU_ARM720T 35#ifdef CONFIG_CPU_ARM720T
54# ifdef CPU_NAME 36# ifdef CPU_NAME
55# undef MULTI_CPU 37# undef MULTI_CPU
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 73f84fa4f366..d36a73d7c0e8 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -110,6 +110,6 @@ extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
110extern void it8152_init_irq(void); 110extern void it8152_init_irq(void);
111extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); 111extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
112extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); 112extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
113extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys); 113extern struct pci_ops it8152_ops;
114 114
115#endif /* __ASM_HARDWARE_IT8152_H */ 115#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index d943b7d20f11..26c511fddf8f 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -12,13 +12,14 @@
12#define __ASM_MACH_PCI_H 12#define __ASM_MACH_PCI_H
13 13
14struct pci_sys_data; 14struct pci_sys_data;
15struct pci_ops;
15struct pci_bus; 16struct pci_bus;
16 17
17struct hw_pci { 18struct hw_pci {
18#ifdef CONFIG_PCI_DOMAINS 19#ifdef CONFIG_PCI_DOMAINS
19 int domain; 20 int domain;
20#endif 21#endif
21 struct list_head buses; 22 struct pci_ops *ops;
22 int nr_controllers; 23 int nr_controllers;
23 int (*setup)(int nr, struct pci_sys_data *); 24 int (*setup)(int nr, struct pci_sys_data *);
24 struct pci_bus *(*scan)(int nr, struct pci_sys_data *); 25 struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
@@ -45,16 +46,10 @@ struct pci_sys_data {
45 u8 (*swizzle)(struct pci_dev *, u8 *); 46 u8 (*swizzle)(struct pci_dev *, u8 *);
46 /* IRQ mapping */ 47 /* IRQ mapping */
47 int (*map_irq)(const struct pci_dev *, u8, u8); 48 int (*map_irq)(const struct pci_dev *, u8, u8);
48 struct hw_pci *hw;
49 void *private_data; /* platform controller private data */ 49 void *private_data; /* platform controller private data */
50}; 50};
51 51
52/* 52/*
53 * This is the standard PCI-PCI bridge swizzling algorithm.
54 */
55#define pci_std_swizzle pci_common_swizzle
56
57/*
58 * Call this with your hw_pci struct to initialise the PCI system. 53 * Call this with your hw_pci struct to initialise the PCI system.
59 */ 54 */
60void pci_common_init(struct hw_pci *); 55void pci_common_init(struct hw_pci *);
@@ -62,22 +57,22 @@ void pci_common_init(struct hw_pci *);
62/* 57/*
63 * PCI controllers 58 * PCI controllers
64 */ 59 */
60extern struct pci_ops iop3xx_ops;
65extern int iop3xx_pci_setup(int nr, struct pci_sys_data *); 61extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
66extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);
67extern void iop3xx_pci_preinit(void); 62extern void iop3xx_pci_preinit(void);
68extern void iop3xx_pci_preinit_cond(void); 63extern void iop3xx_pci_preinit_cond(void);
69 64
65extern struct pci_ops dc21285_ops;
70extern int dc21285_setup(int nr, struct pci_sys_data *); 66extern int dc21285_setup(int nr, struct pci_sys_data *);
71extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
72extern void dc21285_preinit(void); 67extern void dc21285_preinit(void);
73extern void dc21285_postinit(void); 68extern void dc21285_postinit(void);
74 69
70extern struct pci_ops via82c505_ops;
75extern int via82c505_setup(int nr, struct pci_sys_data *); 71extern int via82c505_setup(int nr, struct pci_sys_data *);
76extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *);
77extern void via82c505_init(void *sysdata); 72extern void via82c505_init(void *sysdata);
78 73
74extern struct pci_ops pci_v3_ops;
79extern int pci_v3_setup(int nr, struct pci_sys_data *); 75extern int pci_v3_setup(int nr, struct pci_sys_data *);
80extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
81extern void pci_v3_preinit(void); 76extern void pci_v3_preinit(void);
82extern void pci_v3_postinit(void); 77extern void pci_v3_postinit(void);
83 78
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index f73c908b7fa0..6ca945f534ab 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -42,4 +42,9 @@ struct sys_timer {
42 42
43extern void timer_tick(void); 43extern void timer_tick(void);
44 44
45struct timespec;
46typedef void (*clock_access_fn)(struct timespec *);
47extern int register_persistent_clock(clock_access_fn read_boot,
48 clock_access_fn read_persistent);
49
45#endif 50#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b8e580a297e4..14965658a923 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -34,11 +34,4 @@ typedef struct {
34 34
35#endif 35#endif
36 36
37/*
38 * switch_mm() may do a full cache flush over the context switch,
39 * so enable interrupts over the context switch to avoid high
40 * latency.
41 */
42#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
43
44#endif 37#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac0547c..0306bc642c0d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
49 46
50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
51void __new_context(struct mm_struct *mm); 48void __new_context(struct mm_struct *mm);
49void cpu_set_reserved_ttbr0(void);
52 50
53static inline void check_context(struct mm_struct *mm) 51static inline void switch_new_context(struct mm_struct *mm)
54{ 52{
55 /* 53 unsigned long flags;
56 * This code is executed with interrupts enabled. Therefore, 54
57 * mm->context.id cannot be updated to the latest ASID version 55 __new_context(mm);
58 * on a different CPU (and condition below not triggered) 56
59 * without first getting an IPI to reset the context. The 57 local_irq_save(flags);
60 * alternative is to take a read_lock on mm->context.id_lock 58 cpu_switch_mm(mm->pgd, mm);
61 * (after changing its type to rwlock_t). 59 local_irq_restore(flags);
62 */ 60}
63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
64 __new_context(mm);
65 61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
66 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
67 __check_kvm_seq(mm); 66 __check_kvm_seq(mm);
67
68 /*
69 * Required during context switch to avoid speculative page table
70 * walking with the wrong TTBR.
71 */
72 cpu_set_reserved_ttbr0();
73
74 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
75 /*
76 * The ASID is from the current generation, just switch to the
77 * new pgd. This condition is only true for calls from
78 * context_switch() and interrupts are already disabled.
79 */
80 cpu_switch_mm(mm->pgd, mm);
81 else if (irqs_disabled())
82 /*
83 * Defer the new ASID allocation until after the context
84 * switch critical region since __new_context() cannot be
85 * called with interrupts disabled (it sends IPIs).
86 */
87 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
88 else
89 /*
90 * That is a direct call to switch_mm() or activate_mm() with
91 * interrupts enabled and a new context.
92 */
93 switch_new_context(mm);
68} 94}
69 95
70#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) 96#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
71 97
72#else 98#define finish_arch_post_lock_switch \
73 99 finish_arch_post_lock_switch
74static inline void check_context(struct mm_struct *mm) 100static inline void finish_arch_post_lock_switch(void)
75{ 101{
102 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
103 switch_new_context(current->mm);
104}
105
106#else /* !CONFIG_CPU_HAS_ASID */
107
76#ifdef CONFIG_MMU 108#ifdef CONFIG_MMU
109
110static inline void check_and_switch_context(struct mm_struct *mm,
111 struct task_struct *tsk)
112{
77 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 113 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
78 __check_kvm_seq(mm); 114 __check_kvm_seq(mm);
79#endif 115
116 if (irqs_disabled())
117 /*
118 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
119 * high interrupt latencies, defer the call and continue
120 * running with the old mm. Since we only support UP systems
121 * on non-ASID CPUs, the old mm will remain valid until the
122 * finish_arch_post_lock_switch() call.
123 */
124 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
125 else
126 cpu_switch_mm(mm->pgd, mm);
80} 127}
81 128
129#define finish_arch_post_lock_switch \
130 finish_arch_post_lock_switch
131static inline void finish_arch_post_lock_switch(void)
132{
133 if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
134 struct mm_struct *mm = current->mm;
135 cpu_switch_mm(mm->pgd, mm);
136 }
137}
138
139#endif /* CONFIG_MMU */
140
82#define init_new_context(tsk,mm) 0 141#define init_new_context(tsk,mm) 0
83 142
84#endif 143#endif /* CONFIG_CPU_HAS_ASID */
85 144
86#define destroy_context(mm) do { } while(0) 145#define destroy_context(mm) do { } while(0)
87 146
@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
119 __flush_icache_all(); 178 __flush_icache_all();
120#endif 179#endif
121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 180 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP 181 check_and_switch_context(next, tsk);
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
126 check_context(next);
127 cpu_switch_mm(next->pgd, next);
128 if (cache_is_vivt()) 182 if (cache_is_vivt())
129 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 183 cpumask_clear_cpu(cpu, mm_cpumask(prev));
130 } 184 }
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 5838361c48b3..ecf901902e44 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -34,7 +34,6 @@
34 * processor(s) we're building for. 34 * processor(s) we're building for.
35 * 35 *
36 * We have the following to choose from: 36 * We have the following to choose from:
37 * v3 - ARMv3
38 * v4wt - ARMv4 with writethrough cache, without minicache 37 * v4wt - ARMv4 with writethrough cache, without minicache
39 * v4wb - ARMv4 with writeback cache, without minicache 38 * v4wb - ARMv4 with writeback cache, without minicache
40 * v4_mc - ARMv4 with minicache 39 * v4_mc - ARMv4 with minicache
@@ -44,14 +43,6 @@
44#undef _USER 43#undef _USER
45#undef MULTI_USER 44#undef MULTI_USER
46 45
47#ifdef CONFIG_CPU_COPY_V3
48# ifdef _USER
49# define MULTI_USER 1
50# else
51# define _USER v3
52# endif
53#endif
54
55#ifdef CONFIG_CPU_COPY_V4WT 46#ifdef CONFIG_CPU_COPY_V4WT
56# ifdef _USER 47# ifdef _USER
57# define MULTI_USER 1 48# define MULTI_USER 1
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 759af70f9a0a..b24903549d1c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -69,8 +69,6 @@
69 */ 69 */
70#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ 70#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
71#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 71#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
72#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
73#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
74#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 72#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
75#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ 73#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
76#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 74#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 451808ba1211..355ece523f41 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -249,6 +249,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
249 return regs->ARM_sp; 249 return regs->ARM_sp;
250} 250}
251 251
252static inline unsigned long user_stack_pointer(struct pt_regs *regs)
253{
254 return regs->ARM_sp;
255}
256
252#endif /* __KERNEL__ */ 257#endif /* __KERNEL__ */
253 258
254#endif /* __ASSEMBLY__ */ 259#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
new file mode 100644
index 000000000000..c334a23ddf75
--- /dev/null
+++ b/arch/arm/include/asm/syscall.h
@@ -0,0 +1,93 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * See asm-generic/syscall.h for descriptions of what we must do here.
5 */
6
7#ifndef _ASM_ARM_SYSCALL_H
8#define _ASM_ARM_SYSCALL_H
9
10#include <linux/err.h>
11
12extern const unsigned long sys_call_table[];
13
14static inline int syscall_get_nr(struct task_struct *task,
15 struct pt_regs *regs)
16{
17 return task_thread_info(task)->syscall;
18}
19
20static inline void syscall_rollback(struct task_struct *task,
21 struct pt_regs *regs)
22{
23 regs->ARM_r0 = regs->ARM_ORIG_r0;
24}
25
26static inline long syscall_get_error(struct task_struct *task,
27 struct pt_regs *regs)
28{
29 unsigned long error = regs->ARM_r0;
30 return IS_ERR_VALUE(error) ? error : 0;
31}
32
33static inline long syscall_get_return_value(struct task_struct *task,
34 struct pt_regs *regs)
35{
36 return regs->ARM_r0;
37}
38
39static inline void syscall_set_return_value(struct task_struct *task,
40 struct pt_regs *regs,
41 int error, long val)
42{
43 regs->ARM_r0 = (long) error ? error : val;
44}
45
46#define SYSCALL_MAX_ARGS 7
47
48static inline void syscall_get_arguments(struct task_struct *task,
49 struct pt_regs *regs,
50 unsigned int i, unsigned int n,
51 unsigned long *args)
52{
53 if (i + n > SYSCALL_MAX_ARGS) {
54 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
55 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
56 pr_warning("%s called with max args %d, handling only %d\n",
57 __func__, i + n, SYSCALL_MAX_ARGS);
58 memset(args_bad, 0, n_bad * sizeof(args[0]));
59 n = SYSCALL_MAX_ARGS - i;
60 }
61
62 if (i == 0) {
63 args[0] = regs->ARM_ORIG_r0;
64 args++;
65 i++;
66 n--;
67 }
68
69 memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
70}
71
72static inline void syscall_set_arguments(struct task_struct *task,
73 struct pt_regs *regs,
74 unsigned int i, unsigned int n,
75 const unsigned long *args)
76{
77 if (i + n > SYSCALL_MAX_ARGS) {
78 pr_warning("%s called with max args %d, handling only %d\n",
79 __func__, i + n, SYSCALL_MAX_ARGS);
80 n = SYSCALL_MAX_ARGS - i;
81 }
82
83 if (i == 0) {
84 regs->ARM_ORIG_r0 = args[0];
85 args++;
86 i++;
87 n--;
88 }
89
90 memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
91}
92
93#endif /* _ASM_ARM_SYSCALL_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 0f04d84582e1..68388eb4946b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
153#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 153#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
154#define TIF_RESTORE_SIGMASK 20 154#define TIF_RESTORE_SIGMASK 20
155#define TIF_SECCOMP 21 155#define TIF_SECCOMP 21
156#define TIF_SWITCH_MM 22 /* deferred switch_mm */
156 157
157#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 158#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
158#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 159#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 85fe61e73202..6e924d3a77eb 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -65,21 +65,6 @@
65#define MULTI_TLB 1 65#define MULTI_TLB 1
66#endif 66#endif
67 67
68#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
69
70#ifdef CONFIG_CPU_TLB_V3
71# define v3_possible_flags v3_tlb_flags
72# define v3_always_flags v3_tlb_flags
73# ifdef _TLB
74# define MULTI_TLB 1
75# else
76# define _TLB v3
77# endif
78#else
79# define v3_possible_flags 0
80# define v3_always_flags (-1UL)
81#endif
82
83#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) 68#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
84 69
85#ifdef CONFIG_CPU_TLB_V4WT 70#ifdef CONFIG_CPU_TLB_V4WT
@@ -298,8 +283,7 @@ extern struct cpu_tlb_fns cpu_tlb;
298 * implemented the "%?" method, but this has been discontinued due to too 283 * implemented the "%?" method, but this has been discontinued due to too
299 * many people getting it wrong. 284 * many people getting it wrong.
300 */ 285 */
301#define possible_tlb_flags (v3_possible_flags | \ 286#define possible_tlb_flags (v4_possible_flags | \
302 v4_possible_flags | \
303 v4wbi_possible_flags | \ 287 v4wbi_possible_flags | \
304 fr_possible_flags | \ 288 fr_possible_flags | \
305 v4wb_possible_flags | \ 289 v4wb_possible_flags | \
@@ -307,8 +291,7 @@ extern struct cpu_tlb_fns cpu_tlb;
307 v6wbi_possible_flags | \ 291 v6wbi_possible_flags | \
308 v7wbi_possible_flags) 292 v7wbi_possible_flags)
309 293
310#define always_tlb_flags (v3_always_flags & \ 294#define always_tlb_flags (v4_always_flags & \
311 v4_always_flags & \
312 v4wbi_always_flags & \ 295 v4wbi_always_flags & \
313 fr_always_flags & \ 296 fr_always_flags & \
314 v4wb_always_flags & \ 297 v4wb_always_flags & \