aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/arch_timer.h3
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/delay.h32
-rw-r--r--arch/arm/include/asm/dma-mapping.h24
-rw-r--r--arch/arm/include/asm/kmap_types.h26
-rw-r--r--arch/arm/include/asm/locks.h274
-rw-r--r--arch/arm/include/asm/mach/irq.h2
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/mutex.h119
-rw-r--r--arch/arm/include/asm/perf_event.h17
-rw-r--r--arch/arm/include/asm/pgtable.h40
-rw-r--r--arch/arm/include/asm/pmu.h3
-rw-r--r--arch/arm/include/asm/sched_clock.h2
-rw-r--r--arch/arm/include/asm/setup.h4
-rw-r--r--arch/arm/include/asm/spinlock.h76
-rw-r--r--arch/arm/include/asm/spinlock_types.h17
-rw-r--r--arch/arm/include/asm/timex.h10
-rw-r--r--arch/arm/include/asm/uaccess.h27
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/include/asm/word-at-a-time.h96
20 files changed, 245 insertions, 538 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index ed2e95d46e29..62e75475e57e 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,7 +1,10 @@
1#ifndef __ASMARM_ARCH_TIMER_H 1#ifndef __ASMARM_ARCH_TIMER_H
2#define __ASMARM_ARCH_TIMER_H 2#define __ASMARM_ARCH_TIMER_H
3 3
4#include <asm/errno.h>
5
4#ifdef CONFIG_ARM_ARCH_TIMER 6#ifdef CONFIG_ARM_ARCH_TIMER
7#define ARCH_HAS_READ_CURRENT_TIMER
5int arch_timer_of_register(void); 8int arch_timer_of_register(void);
6int arch_timer_sched_clock_init(void); 9int arch_timer_sched_clock_init(void);
7#else 10#else
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 004c1bc95d2b..e4448e16046d 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
215static inline void 215static inline void
216vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 216vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
217{ 217{
218 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 218 struct mm_struct *mm = vma->vm_mm;
219
220 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
219 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 221 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
220 vma->vm_flags); 222 vma->vm_flags);
221} 223}
@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
223static inline void 225static inline void
224vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 226vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
225{ 227{
226 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 228 struct mm_struct *mm = vma->vm_mm;
229
230 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
227 unsigned long addr = user_addr & PAGE_MASK; 231 unsigned long addr = user_addr & PAGE_MASK;
228 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 232 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
229 } 233 }
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda181549..dc6145120de3 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -6,9 +6,22 @@
6#ifndef __ASM_ARM_DELAY_H 6#ifndef __ASM_ARM_DELAY_H
7#define __ASM_ARM_DELAY_H 7#define __ASM_ARM_DELAY_H
8 8
9#include <asm/memory.h>
9#include <asm/param.h> /* HZ */ 10#include <asm/param.h> /* HZ */
10 11
11extern void __delay(int loops); 12#define MAX_UDELAY_MS 2
13#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
14#define UDELAY_SHIFT 30
15
16#ifndef __ASSEMBLY__
17
18extern struct arm_delay_ops {
19 void (*delay)(unsigned long);
20 void (*const_udelay)(unsigned long);
21 void (*udelay)(unsigned long);
22} arm_delay_ops;
23
24#define __delay(n) arm_delay_ops.delay(n)
12 25
13/* 26/*
14 * This function intentionally does not exist; if you see references to 27 * This function intentionally does not exist; if you see references to
@@ -23,22 +36,27 @@ extern void __bad_udelay(void);
23 * division by multiplication: you don't have to worry about 36 * division by multiplication: you don't have to worry about
24 * loss of precision. 37 * loss of precision.
25 * 38 *
26 * Use only for very small delays ( < 1 msec). Should probably use a 39 * Use only for very small delays ( < 2 msec). Should probably use a
27 * lookup table, really, as the multiplications take much too long with 40 * lookup table, really, as the multiplications take much too long with
28 * short delays. This is a "reasonable" implementation, though (and the 41 * short delays. This is a "reasonable" implementation, though (and the
29 * first constant multiplications gets optimized away if the delay is 42 * first constant multiplications gets optimized away if the delay is
30 * a constant) 43 * a constant)
31 */ 44 */
32extern void __udelay(unsigned long usecs); 45#define __udelay(n) arm_delay_ops.udelay(n)
33extern void __const_udelay(unsigned long); 46#define __const_udelay(n) arm_delay_ops.const_udelay(n)
34
35#define MAX_UDELAY_MS 2
36 47
37#define udelay(n) \ 48#define udelay(n) \
38 (__builtin_constant_p(n) ? \ 49 (__builtin_constant_p(n) ? \
39 ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ 50 ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
40 __const_udelay((n) * ((2199023U*HZ)>>11))) : \ 51 __const_udelay((n) * UDELAY_MULT)) : \
41 __udelay(n)) 52 __udelay(n))
42 53
54/* Loop-based definitions for assembly code. */
55extern void __loop_delay(unsigned long loops);
56extern void __loop_udelay(unsigned long usecs);
57extern void __loop_const_udelay(unsigned long);
58
59#endif /* __ASSEMBLY__ */
60
43#endif /* defined(_ARM_DELAY_H) */ 61#endif /* defined(_ARM_DELAY_H) */
44 62
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index bbef15d04890..2ae842df4551 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
186 void *cpu_addr, dma_addr_t dma_addr, size_t size, 186 void *cpu_addr, dma_addr_t dma_addr, size_t size,
187 struct dma_attrs *attrs); 187 struct dma_attrs *attrs);
188 188
189#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
190
191static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
192 void *cpu_addr, dma_addr_t dma_addr,
193 size_t size, struct dma_attrs *attrs)
194{
195 struct dma_map_ops *ops = get_dma_ops(dev);
196 BUG_ON(!ops);
197 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
198}
199
200static inline void *dma_alloc_writecombine(struct device *dev, size_t size, 189static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
201 dma_addr_t *dma_handle, gfp_t flag) 190 dma_addr_t *dma_handle, gfp_t flag)
202{ 191{
@@ -213,20 +202,12 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
213 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); 202 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
214} 203}
215 204
216static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
217 void *cpu_addr, dma_addr_t dma_addr, size_t size)
218{
219 DEFINE_DMA_ATTRS(attrs);
220 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
221 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
222}
223
224/* 205/*
225 * This can be called during boot to increase the size of the consistent 206 * This can be called during boot to increase the size of the consistent
226 * DMA region above it's default value of 2MB. It must be called before the 207 * DMA region above it's default value of 2MB. It must be called before the
227 * memory allocator is initialised, i.e. before any core_initcall. 208 * memory allocator is initialised, i.e. before any core_initcall.
228 */ 209 */
229extern void __init init_consistent_dma_size(unsigned long size); 210static inline void init_consistent_dma_size(unsigned long size) { }
230 211
231/* 212/*
232 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 213 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
@@ -280,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
280 enum dma_data_direction); 261 enum dma_data_direction);
281extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, 262extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
282 enum dma_data_direction); 263 enum dma_data_direction);
264extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
265 void *cpu_addr, dma_addr_t dma_addr, size_t size,
266 struct dma_attrs *attrs);
283 267
284#endif /* __KERNEL__ */ 268#endif /* __KERNEL__ */
285#endif 269#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index e51b1e81df05..83eb2f772911 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -4,30 +4,6 @@
4/* 4/*
5 * This is the "bare minimum". AIO seems to require this. 5 * This is the "bare minimum". AIO seems to require this.
6 */ 6 */
7enum km_type { 7#define KM_TYPE_NR 16
8 KM_BOUNCE_READ,
9 KM_SKB_SUNRPC_DATA,
10 KM_SKB_DATA_SOFTIRQ,
11 KM_USER0,
12 KM_USER1,
13 KM_BIO_SRC_IRQ,
14 KM_BIO_DST_IRQ,
15 KM_PTE0,
16 KM_PTE1,
17 KM_IRQ0,
18 KM_IRQ1,
19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1,
21 KM_L1_CACHE,
22 KM_L2_CACHE,
23 KM_KDB,
24 KM_TYPE_NR
25};
26
27#ifdef CONFIG_DEBUG_HIGHMEM
28#define KM_NMI (-1)
29#define KM_NMI_PTE (-1)
30#define KM_IRQ_PTE (-1)
31#endif
32 8
33#endif 9#endif
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h
deleted file mode 100644
index ef4c897772d1..000000000000
--- a/arch/arm/include/asm/locks.h
+++ /dev/null
@@ -1,274 +0,0 @@
1/*
2 * arch/arm/include/asm/locks.h
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt safe locking assembler.
11 */
12#ifndef __ASM_PROC_LOCKS_H
13#define __ASM_PROC_LOCKS_H
14
15#if __LINUX_ARM_ARCH__ >= 6
16
17#define __down_op(ptr,fail) \
18 ({ \
19 __asm__ __volatile__( \
20 "@ down_op\n" \
21"1: ldrex lr, [%0]\n" \
22" sub lr, lr, %1\n" \
23" strex ip, lr, [%0]\n" \
24" teq ip, #0\n" \
25" bne 1b\n" \
26" teq lr, #0\n" \
27" movmi ip, %0\n" \
28" blmi " #fail \
29 : \
30 : "r" (ptr), "I" (1) \
31 : "ip", "lr", "cc"); \
32 smp_mb(); \
33 })
34
35#define __down_op_ret(ptr,fail) \
36 ({ \
37 unsigned int ret; \
38 __asm__ __volatile__( \
39 "@ down_op_ret\n" \
40"1: ldrex lr, [%1]\n" \
41" sub lr, lr, %2\n" \
42" strex ip, lr, [%1]\n" \
43" teq ip, #0\n" \
44" bne 1b\n" \
45" teq lr, #0\n" \
46" movmi ip, %1\n" \
47" movpl ip, #0\n" \
48" blmi " #fail "\n" \
49" mov %0, ip" \
50 : "=&r" (ret) \
51 : "r" (ptr), "I" (1) \
52 : "ip", "lr", "cc"); \
53 smp_mb(); \
54 ret; \
55 })
56
57#define __up_op(ptr,wake) \
58 ({ \
59 smp_mb(); \
60 __asm__ __volatile__( \
61 "@ up_op\n" \
62"1: ldrex lr, [%0]\n" \
63" add lr, lr, %1\n" \
64" strex ip, lr, [%0]\n" \
65" teq ip, #0\n" \
66" bne 1b\n" \
67" cmp lr, #0\n" \
68" movle ip, %0\n" \
69" blle " #wake \
70 : \
71 : "r" (ptr), "I" (1) \
72 : "ip", "lr", "cc"); \
73 })
74
75/*
76 * The value 0x01000000 supports up to 128 processors and
77 * lots of processes. BIAS must be chosen such that sub'ing
78 * BIAS once per CPU will result in the long remaining
79 * negative.
80 */
81#define RW_LOCK_BIAS 0x01000000
82#define RW_LOCK_BIAS_STR "0x01000000"
83
84#define __down_op_write(ptr,fail) \
85 ({ \
86 __asm__ __volatile__( \
87 "@ down_op_write\n" \
88"1: ldrex lr, [%0]\n" \
89" sub lr, lr, %1\n" \
90" strex ip, lr, [%0]\n" \
91" teq ip, #0\n" \
92" bne 1b\n" \
93" teq lr, #0\n" \
94" movne ip, %0\n" \
95" blne " #fail \
96 : \
97 : "r" (ptr), "I" (RW_LOCK_BIAS) \
98 : "ip", "lr", "cc"); \
99 smp_mb(); \
100 })
101
102#define __up_op_write(ptr,wake) \
103 ({ \
104 smp_mb(); \
105 __asm__ __volatile__( \
106 "@ up_op_write\n" \
107"1: ldrex lr, [%0]\n" \
108" adds lr, lr, %1\n" \
109" strex ip, lr, [%0]\n" \
110" teq ip, #0\n" \
111" bne 1b\n" \
112" movcs ip, %0\n" \
113" blcs " #wake \
114 : \
115 : "r" (ptr), "I" (RW_LOCK_BIAS) \
116 : "ip", "lr", "cc"); \
117 })
118
119#define __down_op_read(ptr,fail) \
120 __down_op(ptr, fail)
121
122#define __up_op_read(ptr,wake) \
123 ({ \
124 smp_mb(); \
125 __asm__ __volatile__( \
126 "@ up_op_read\n" \
127"1: ldrex lr, [%0]\n" \
128" add lr, lr, %1\n" \
129" strex ip, lr, [%0]\n" \
130" teq ip, #0\n" \
131" bne 1b\n" \
132" teq lr, #0\n" \
133" moveq ip, %0\n" \
134" bleq " #wake \
135 : \
136 : "r" (ptr), "I" (1) \
137 : "ip", "lr", "cc"); \
138 })
139
140#else
141
142#define __down_op(ptr,fail) \
143 ({ \
144 __asm__ __volatile__( \
145 "@ down_op\n" \
146" mrs ip, cpsr\n" \
147" orr lr, ip, #128\n" \
148" msr cpsr_c, lr\n" \
149" ldr lr, [%0]\n" \
150" subs lr, lr, %1\n" \
151" str lr, [%0]\n" \
152" msr cpsr_c, ip\n" \
153" movmi ip, %0\n" \
154" blmi " #fail \
155 : \
156 : "r" (ptr), "I" (1) \
157 : "ip", "lr", "cc"); \
158 smp_mb(); \
159 })
160
161#define __down_op_ret(ptr,fail) \
162 ({ \
163 unsigned int ret; \
164 __asm__ __volatile__( \
165 "@ down_op_ret\n" \
166" mrs ip, cpsr\n" \
167" orr lr, ip, #128\n" \
168" msr cpsr_c, lr\n" \
169" ldr lr, [%1]\n" \
170" subs lr, lr, %2\n" \
171" str lr, [%1]\n" \
172" msr cpsr_c, ip\n" \
173" movmi ip, %1\n" \
174" movpl ip, #0\n" \
175" blmi " #fail "\n" \
176" mov %0, ip" \
177 : "=&r" (ret) \
178 : "r" (ptr), "I" (1) \
179 : "ip", "lr", "cc"); \
180 smp_mb(); \
181 ret; \
182 })
183
184#define __up_op(ptr,wake) \
185 ({ \
186 smp_mb(); \
187 __asm__ __volatile__( \
188 "@ up_op\n" \
189" mrs ip, cpsr\n" \
190" orr lr, ip, #128\n" \
191" msr cpsr_c, lr\n" \
192" ldr lr, [%0]\n" \
193" adds lr, lr, %1\n" \
194" str lr, [%0]\n" \
195" msr cpsr_c, ip\n" \
196" movle ip, %0\n" \
197" blle " #wake \
198 : \
199 : "r" (ptr), "I" (1) \
200 : "ip", "lr", "cc"); \
201 })
202
203/*
204 * The value 0x01000000 supports up to 128 processors and
205 * lots of processes. BIAS must be chosen such that sub'ing
206 * BIAS once per CPU will result in the long remaining
207 * negative.
208 */
209#define RW_LOCK_BIAS 0x01000000
210#define RW_LOCK_BIAS_STR "0x01000000"
211
212#define __down_op_write(ptr,fail) \
213 ({ \
214 __asm__ __volatile__( \
215 "@ down_op_write\n" \
216" mrs ip, cpsr\n" \
217" orr lr, ip, #128\n" \
218" msr cpsr_c, lr\n" \
219" ldr lr, [%0]\n" \
220" subs lr, lr, %1\n" \
221" str lr, [%0]\n" \
222" msr cpsr_c, ip\n" \
223" movne ip, %0\n" \
224" blne " #fail \
225 : \
226 : "r" (ptr), "I" (RW_LOCK_BIAS) \
227 : "ip", "lr", "cc"); \
228 smp_mb(); \
229 })
230
231#define __up_op_write(ptr,wake) \
232 ({ \
233 __asm__ __volatile__( \
234 "@ up_op_write\n" \
235" mrs ip, cpsr\n" \
236" orr lr, ip, #128\n" \
237" msr cpsr_c, lr\n" \
238" ldr lr, [%0]\n" \
239" adds lr, lr, %1\n" \
240" str lr, [%0]\n" \
241" msr cpsr_c, ip\n" \
242" movcs ip, %0\n" \
243" blcs " #wake \
244 : \
245 : "r" (ptr), "I" (RW_LOCK_BIAS) \
246 : "ip", "lr", "cc"); \
247 smp_mb(); \
248 })
249
250#define __down_op_read(ptr,fail) \
251 __down_op(ptr, fail)
252
253#define __up_op_read(ptr,wake) \
254 ({ \
255 smp_mb(); \
256 __asm__ __volatile__( \
257 "@ up_op_read\n" \
258" mrs ip, cpsr\n" \
259" orr lr, ip, #128\n" \
260" msr cpsr_c, lr\n" \
261" ldr lr, [%0]\n" \
262" adds lr, lr, %1\n" \
263" str lr, [%0]\n" \
264" msr cpsr_c, ip\n" \
265" moveq ip, %0\n" \
266" bleq " #wake \
267 : \
268 : "r" (ptr), "I" (1) \
269 : "ip", "lr", "cc"); \
270 })
271
272#endif
273
274#endif
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index febe495d0c6e..15cb035309f7 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -17,7 +17,7 @@ struct seq_file;
17/* 17/*
18 * This is internal. Do not use it. 18 * This is internal. Do not use it.
19 */ 19 */
20extern void init_FIQ(void); 20extern void init_FIQ(int);
21extern int show_fiq_list(struct seq_file *, int); 21extern int show_fiq_list(struct seq_file *, int);
22 22
23#ifdef CONFIG_MULTI_IRQ_HANDLER 23#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index fcb575747e5e..e965f1b560f1 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -16,7 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/const.h> 17#include <linux/const.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/sizes.h> 19#include <linux/sizes.h>
20 20
21#ifdef CONFIG_NEED_MACH_MEMORY_H 21#ifdef CONFIG_NEED_MACH_MEMORY_H
22#include <mach/memory.h> 22#include <mach/memory.h>
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf23ae0..b1479fd04a95 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -7,121 +7,10 @@
7 */ 7 */
8#ifndef _ASM_MUTEX_H 8#ifndef _ASM_MUTEX_H
9#define _ASM_MUTEX_H 9#define _ASM_MUTEX_H
10
11#if __LINUX_ARM_ARCH__ < 6
12/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
13# include <asm-generic/mutex-xchg.h>
14#else
15
16/* 10/*
17 * Attempting to lock a mutex on ARMv6+ can be done with a bastardized 11 * On pre-ARMv6 hardware this results in a swp-based implementation,
18 * atomic decrement (it is not a reliable atomic decrement but it satisfies 12 * which is the most efficient. For ARMv6+, we emit a pair of exclusive
19 * the defined semantics for our purpose, while being smaller and faster 13 * accesses instead.
20 * than a real atomic decrement or atomic swap. The idea is to attempt
21 * decrementing the lock value only once. If once decremented it isn't zero,
22 * or if its store-back fails due to a dispute on the exclusive store, we
23 * simply bail out immediately through the slow path where the lock will be
24 * reattempted until it succeeds.
25 */ 14 */
26static inline void 15#include <asm-generic/mutex-xchg.h>
27__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
28{
29 int __ex_flag, __res;
30
31 __asm__ (
32
33 "ldrex %0, [%2] \n\t"
34 "sub %0, %0, #1 \n\t"
35 "strex %1, %0, [%2] "
36
37 : "=&r" (__res), "=&r" (__ex_flag)
38 : "r" (&(count)->counter)
39 : "cc","memory" );
40
41 __res |= __ex_flag;
42 if (unlikely(__res != 0))
43 fail_fn(count);
44}
45
46static inline int
47__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
48{
49 int __ex_flag, __res;
50
51 __asm__ (
52
53 "ldrex %0, [%2] \n\t"
54 "sub %0, %0, #1 \n\t"
55 "strex %1, %0, [%2] "
56
57 : "=&r" (__res), "=&r" (__ex_flag)
58 : "r" (&(count)->counter)
59 : "cc","memory" );
60
61 __res |= __ex_flag;
62 if (unlikely(__res != 0))
63 __res = fail_fn(count);
64 return __res;
65}
66
67/*
68 * Same trick is used for the unlock fast path. However the original value,
69 * rather than the result, is used to test for success in order to have
70 * better generated assembly.
71 */
72static inline void
73__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
74{
75 int __ex_flag, __res, __orig;
76
77 __asm__ (
78
79 "ldrex %0, [%3] \n\t"
80 "add %1, %0, #1 \n\t"
81 "strex %2, %1, [%3] "
82
83 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
84 : "r" (&(count)->counter)
85 : "cc","memory" );
86
87 __orig |= __ex_flag;
88 if (unlikely(__orig != 0))
89 fail_fn(count);
90}
91
92/*
93 * If the unlock was done on a contended lock, or if the unlock simply fails
94 * then the mutex remains locked.
95 */
96#define __mutex_slowpath_needs_to_unlock() 1
97
98/*
99 * For __mutex_fastpath_trylock we use another construct which could be
100 * described as a "single value cmpxchg".
101 *
102 * This provides the needed trylock semantics like cmpxchg would, but it is
103 * lighter and less generic than a true cmpxchg implementation.
104 */
105static inline int
106__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
107{
108 int __ex_flag, __res, __orig;
109
110 __asm__ (
111
112 "1: ldrex %0, [%3] \n\t"
113 "subs %1, %0, #1 \n\t"
114 "strexeq %2, %1, [%3] \n\t"
115 "movlt %0, #0 \n\t"
116 "cmpeq %2, #0 \n\t"
117 "bgt 1b "
118
119 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
120 : "r" (&count->counter)
121 : "cc", "memory" );
122
123 return __orig;
124}
125
126#endif
127#endif 16#endif
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 00cbe10a50e3..e074948d8143 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,21 +12,6 @@
12#ifndef __ARM_PERF_EVENT_H__ 12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__ 13#define __ARM_PERF_EVENT_H__
14 14
15/* ARM perf PMU IDs for use by internal perf clients. */ 15/* Nothing to see here... */
16enum arm_perf_pmu_ids {
17 ARM_PERF_PMU_ID_XSCALE1 = 0,
18 ARM_PERF_PMU_ID_XSCALE2,
19 ARM_PERF_PMU_ID_V6,
20 ARM_PERF_PMU_ID_V6MP,
21 ARM_PERF_PMU_ID_CA8,
22 ARM_PERF_PMU_ID_CA9,
23 ARM_PERF_PMU_ID_CA5,
24 ARM_PERF_PMU_ID_CA15,
25 ARM_PERF_PMU_ID_CA7,
26 ARM_NUM_PMU_IDS,
27};
28
29extern enum arm_perf_pmu_ids
30armpmu_get_pmu_id(void);
31 16
32#endif /* __ARM_PERF_EVENT_H__ */ 17#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d71e7d..41dc31f834c3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
195 195
196#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 196#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
197 197
198#define pte_none(pte) (!pte_val(pte))
199#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
200#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
201#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
202#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
204#define pte_special(pte) (0)
205
206#define pte_present_user(pte) \
207 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
208 (L_PTE_PRESENT | L_PTE_USER))
209
198#if __LINUX_ARM_ARCH__ < 6 210#if __LINUX_ARM_ARCH__ < 6
199static inline void __sync_icache_dcache(pte_t pteval) 211static inline void __sync_icache_dcache(pte_t pteval)
200{ 212{
@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);
206static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 218static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
207 pte_t *ptep, pte_t pteval) 219 pte_t *ptep, pte_t pteval)
208{ 220{
209 if (addr >= TASK_SIZE) 221 unsigned long ext = 0;
210 set_pte_ext(ptep, pteval, 0); 222
211 else { 223 if (addr < TASK_SIZE && pte_present_user(pteval)) {
212 __sync_icache_dcache(pteval); 224 __sync_icache_dcache(pteval);
213 set_pte_ext(ptep, pteval, PTE_EXT_NG); 225 ext |= PTE_EXT_NG;
214 } 226 }
215}
216 227
217#define pte_none(pte) (!pte_val(pte)) 228 set_pte_ext(ptep, pteval, ext);
218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 229}
219#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
220#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
221#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
222#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
223#define pte_special(pte) (0)
224
225#define pte_present_user(pte) \
226 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
227 (L_PTE_PRESENT | L_PTE_USER))
228 230
229#define PTE_BIT_FUNC(fn,op) \ 231#define PTE_BIT_FUNC(fn,op) \
230static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 232static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
251 * 253 *
252 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 254 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
253 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 255 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
254 * <--------------- offset --------------------> <- type --> 0 0 0 256 * <--------------- offset ----------------------> < type -> 0 0 0
255 * 257 *
256 * This gives us up to 63 swap files and 32GB per swap file. Note that 258 * This gives us up to 31 swap files and 64GB per swap file. Note that
257 * the offset field is always non-zero. 259 * the offset field is always non-zero.
258 */ 260 */
259#define __SWP_TYPE_SHIFT 3 261#define __SWP_TYPE_SHIFT 3
260#define __SWP_TYPE_BITS 6 262#define __SWP_TYPE_BITS 5
261#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 263#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
262#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 264#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
263 265
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 90114faa9f3c..4432305f4a2a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -103,10 +103,9 @@ struct pmu_hw_events {
103 103
104struct arm_pmu { 104struct arm_pmu {
105 struct pmu pmu; 105 struct pmu pmu;
106 enum arm_perf_pmu_ids id;
107 enum arm_pmu_type type; 106 enum arm_pmu_type type;
108 cpumask_t active_irqs; 107 cpumask_t active_irqs;
109 const char *name; 108 char *name;
110 irqreturn_t (*handle_irq)(int irq_num, void *dev); 109 irqreturn_t (*handle_irq)(int irq_num, void *dev);
111 void (*enable)(struct hw_perf_event *evt, int idx); 110 void (*enable)(struct hw_perf_event *evt, int idx);
112 void (*disable)(struct hw_perf_event *evt, int idx); 111 void (*disable)(struct hw_perf_event *evt, int idx);
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index e3f757263438..05b8e82ec9f5 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -10,5 +10,7 @@
10 10
11extern void sched_clock_postinit(void); 11extern void sched_clock_postinit(void);
12extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); 12extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
13extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
14 unsigned long rate);
13 15
14#endif 16#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 23ebc0c82a39..24d284a1bfc7 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -196,7 +196,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn }
196 196
197struct membank { 197struct membank {
198 phys_addr_t start; 198 phys_addr_t start;
199 unsigned long size; 199 phys_addr_t size;
200 unsigned int highmem; 200 unsigned int highmem;
201}; 201};
202 202
@@ -217,7 +217,7 @@ extern struct meminfo meminfo;
217#define bank_phys_end(bank) ((bank)->start + (bank)->size) 217#define bank_phys_end(bank) ((bank)->start + (bank)->size)
218#define bank_phys_size(bank) (bank)->size 218#define bank_phys_size(bank) (bank)->size
219 219
220extern int arm_add_memory(phys_addr_t start, unsigned long size); 220extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
221extern void early_print(const char *str, ...); 221extern void early_print(const char *str, ...);
222extern void dump_machine_table(void); 222extern void dump_machine_table(void);
223 223
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c88095c..b4ca707d0a69 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -59,18 +59,13 @@ static inline void dsb_sev(void)
59} 59}
60 60
61/* 61/*
62 * ARMv6 Spin-locking. 62 * ARMv6 ticket-based spin-locking.
63 * 63 *
64 * We exclusively read the old value. If it is zero, we may have 64 * A memory barrier is required after we get a lock, and before we
65 * won the lock, so we try exclusively storing it. A memory barrier 65 * release it, because V6 CPUs are assumed to have weakly ordered
66 * is required after we get a lock, and before we release it, because 66 * memory.
67 * V6 CPUs are assumed to have weakly ordered memory.
68 *
69 * Unlocked value: 0
70 * Locked value: 1
71 */ 67 */
72 68
73#define arch_spin_is_locked(x) ((x)->lock != 0)
74#define arch_spin_unlock_wait(lock) \ 69#define arch_spin_unlock_wait(lock) \
75 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 70 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
76 71
@@ -79,31 +74,39 @@ static inline void dsb_sev(void)
79static inline void arch_spin_lock(arch_spinlock_t *lock) 74static inline void arch_spin_lock(arch_spinlock_t *lock)
80{ 75{
81 unsigned long tmp; 76 unsigned long tmp;
77 u32 newval;
78 arch_spinlock_t lockval;
82 79
83 __asm__ __volatile__( 80 __asm__ __volatile__(
84"1: ldrex %0, [%1]\n" 81"1: ldrex %0, [%3]\n"
85" teq %0, #0\n" 82" add %1, %0, %4\n"
86 WFE("ne") 83" strex %2, %1, [%3]\n"
87" strexeq %0, %2, [%1]\n" 84" teq %2, #0\n"
88" teqeq %0, #0\n"
89" bne 1b" 85" bne 1b"
90 : "=&r" (tmp) 86 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
91 : "r" (&lock->lock), "r" (1) 87 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
92 : "cc"); 88 : "cc");
93 89
90 while (lockval.tickets.next != lockval.tickets.owner) {
91 wfe();
92 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
93 }
94
94 smp_mb(); 95 smp_mb();
95} 96}
96 97
97static inline int arch_spin_trylock(arch_spinlock_t *lock) 98static inline int arch_spin_trylock(arch_spinlock_t *lock)
98{ 99{
99 unsigned long tmp; 100 unsigned long tmp;
101 u32 slock;
100 102
101 __asm__ __volatile__( 103 __asm__ __volatile__(
102" ldrex %0, [%1]\n" 104" ldrex %0, [%2]\n"
103" teq %0, #0\n" 105" subs %1, %0, %0, ror #16\n"
104" strexeq %0, %2, [%1]" 106" addeq %0, %0, %3\n"
105 : "=&r" (tmp) 107" strexeq %1, %0, [%2]"
106 : "r" (&lock->lock), "r" (1) 108 : "=&r" (slock), "=&r" (tmp)
109 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
107 : "cc"); 110 : "cc");
108 111
109 if (tmp == 0) { 112 if (tmp == 0) {
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
116 119
117static inline void arch_spin_unlock(arch_spinlock_t *lock) 120static inline void arch_spin_unlock(arch_spinlock_t *lock)
118{ 121{
122 unsigned long tmp;
123 u32 slock;
124
119 smp_mb(); 125 smp_mb();
120 126
121 __asm__ __volatile__( 127 __asm__ __volatile__(
122" str %1, [%0]\n" 128" mov %1, #1\n"
123 : 129"1: ldrex %0, [%2]\n"
124 : "r" (&lock->lock), "r" (0) 130" uadd16 %0, %0, %1\n"
131" strex %1, %0, [%2]\n"
132" teq %1, #0\n"
133" bne 1b"
134 : "=&r" (slock), "=&r" (tmp)
135 : "r" (&lock->slock)
125 : "cc"); 136 : "cc");
126 137
127 dsb_sev(); 138 dsb_sev();
128} 139}
129 140
141static inline int arch_spin_is_locked(arch_spinlock_t *lock)
142{
143 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
144 return tickets.owner != tickets.next;
145}
146
147static inline int arch_spin_is_contended(arch_spinlock_t *lock)
148{
149 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
150 return (tickets.next - tickets.owner) > 1;
151}
152#define arch_spin_is_contended arch_spin_is_contended
153
130/* 154/*
131 * RWLOCKS 155 * RWLOCKS
132 * 156 *
@@ -158,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
158 unsigned long tmp; 182 unsigned long tmp;
159 183
160 __asm__ __volatile__( 184 __asm__ __volatile__(
161"1: ldrex %0, [%1]\n" 185" ldrex %0, [%1]\n"
162" teq %0, #0\n" 186" teq %0, #0\n"
163" strexeq %0, %2, [%1]" 187" strexeq %0, %2, [%1]"
164 : "=&r" (tmp) 188 : "=&r" (tmp)
@@ -244,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
244 unsigned long tmp, tmp2 = 1; 268 unsigned long tmp, tmp2 = 1;
245 269
246 __asm__ __volatile__( 270 __asm__ __volatile__(
247"1: ldrex %0, [%2]\n" 271" ldrex %0, [%2]\n"
248" adds %0, %0, #1\n" 272" adds %0, %0, #1\n"
249" strexpl %1, %0, [%2]\n" 273" strexpl %1, %0, [%2]\n"
250 : "=&r" (tmp), "+r" (tmp2) 274 : "=&r" (tmp), "+r" (tmp2)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index d14d197ae04a..b262d2f8b478 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -5,11 +5,24 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8#define TICKET_SHIFT 16
9
8typedef struct { 10typedef struct {
9 volatile unsigned int lock; 11 union {
12 u32 slock;
13 struct __raw_tickets {
14#ifdef __ARMEB__
15 u16 next;
16 u16 owner;
17#else
18 u16 owner;
19 u16 next;
20#endif
21 } tickets;
22 };
10} arch_spinlock_t; 23} arch_spinlock_t;
11 24
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 25#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
13 26
14typedef struct { 27typedef struct {
15 volatile unsigned int lock; 28 volatile unsigned int lock;
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 3be8de3adaba..ce119442277c 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -12,13 +12,15 @@
12#ifndef _ASMARM_TIMEX_H 12#ifndef _ASMARM_TIMEX_H
13#define _ASMARM_TIMEX_H 13#define _ASMARM_TIMEX_H
14 14
15#include <asm/arch_timer.h>
15#include <mach/timex.h> 16#include <mach/timex.h>
16 17
17typedef unsigned long cycles_t; 18typedef unsigned long cycles_t;
18 19
19static inline cycles_t get_cycles (void) 20#ifdef ARCH_HAS_READ_CURRENT_TIMER
20{ 21#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
21 return 0; 22#else
22} 23#define get_cycles() (0)
24#endif
23 25
24#endif 26#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 71f6536d17ac..479a6352e0b5 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -189,6 +189,9 @@ static inline void set_fs(mm_segment_t fs)
189 189
190#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 190#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
191 191
192#define user_addr_max() \
193 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
194
192/* 195/*
193 * The "__xxx" versions of the user access functions do not verify the 196 * The "__xxx" versions of the user access functions do not verify the
194 * address space - it must have been done previously with a separate 197 * address space - it must have been done previously with a separate
@@ -398,9 +401,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
398#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 401#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
399#endif 402#endif
400 403
401extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
402extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
403
404static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 404static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
405{ 405{
406 if (access_ok(VERIFY_READ, from, n)) 406 if (access_ok(VERIFY_READ, from, n))
@@ -427,24 +427,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
427 return n; 427 return n;
428} 428}
429 429
430static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) 430extern long strncpy_from_user(char *dest, const char __user *src, long count);
431{
432 long res = -EFAULT;
433 if (access_ok(VERIFY_READ, src, 1))
434 res = __strncpy_from_user(dst, src, count);
435 return res;
436}
437
438#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
439 431
440static inline long __must_check strnlen_user(const char __user *s, long n) 432extern __must_check long strlen_user(const char __user *str);
441{ 433extern __must_check long strnlen_user(const char __user *str, long n);
442 unsigned long res = 0;
443
444 if (__addr_ok(s))
445 res = __strnlen_user(s, n);
446
447 return res;
448}
449 434
450#endif /* _ASMARM_UACCESS_H */ 435#endif /* _ASMARM_UACCESS_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 512cd1473454..0cab47d4a83f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -446,7 +446,6 @@
446 446
447#ifdef __KERNEL__ 447#ifdef __KERNEL__
448 448
449#define __ARCH_WANT_IPC_PARSE_VERSION
450#define __ARCH_WANT_STAT64 449#define __ARCH_WANT_STAT64
451#define __ARCH_WANT_SYS_GETHOSTNAME 450#define __ARCH_WANT_SYS_GETHOSTNAME
452#define __ARCH_WANT_SYS_PAUSE 451#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..4d52f92967a6
--- /dev/null
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -0,0 +1,96 @@
1#ifndef __ASM_ARM_WORD_AT_A_TIME_H
2#define __ASM_ARM_WORD_AT_A_TIME_H
3
4#ifndef __ARMEB__
5
6/*
7 * Little-endian word-at-a-time zero byte handling.
8 * Heavily based on the x86 algorithm.
9 */
10#include <linux/kernel.h>
11
12struct word_at_a_time {
13 const unsigned long one_bits, high_bits;
14};
15
16#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
17
18static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
19 const struct word_at_a_time *c)
20{
21 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
22 *bits = mask;
23 return mask;
24}
25
26#define prep_zero_mask(a, bits, c) (bits)
27
28static inline unsigned long create_zero_mask(unsigned long bits)
29{
30 bits = (bits - 1) & ~bits;
31 return bits >> 7;
32}
33
34static inline unsigned long find_zero(unsigned long mask)
35{
36 unsigned long ret;
37
38#if __LINUX_ARM_ARCH__ >= 5
39 /* We have clz available. */
40 ret = fls(mask) >> 3;
41#else
42 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
43 ret = (0x0ff0001 + mask) >> 23;
44 /* Fix the 1 for 00 case */
45 ret &= mask;
46#endif
47
48 return ret;
49}
50
51#ifdef CONFIG_DCACHE_WORD_ACCESS
52
53#define zero_bytemask(mask) (mask)
54
55/*
56 * Load an unaligned word from kernel space.
57 *
58 * In the (very unlikely) case of the word being a page-crosser
59 * and the next page not being mapped, take the exception and
60 * return zeroes in the non-existing part.
61 */
62static inline unsigned long load_unaligned_zeropad(const void *addr)
63{
64 unsigned long ret, offset;
65
66 /* Load word from unaligned pointer addr */
67 asm(
68 "1: ldr %0, [%2]\n"
69 "2:\n"
70 " .pushsection .fixup,\"ax\"\n"
71 " .align 2\n"
72 "3: and %1, %2, #0x3\n"
73 " bic %2, %2, #0x3\n"
74 " ldr %0, [%2]\n"
75 " lsl %1, %1, #0x3\n"
76 " lsr %0, %0, %1\n"
77 " b 2b\n"
78 " .popsection\n"
79 " .pushsection __ex_table,\"a\"\n"
80 " .align 3\n"
81 " .long 1b, 3b\n"
82 " .popsection"
83 : "=&r" (ret), "=&r" (offset)
84 : "r" (addr), "Qo" (*(unsigned long *)addr));
85
86 return ret;
87}
88
89
90#endif /* DCACHE_WORD_ACCESS */
91
92#else /* __ARMEB__ */
93#include <asm-generic/word-at-a-time.h>
94#endif
95
96#endif /* __ASM_ARM_WORD_AT_A_TIME_H */