aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/assembler.h27
-rw-r--r--arch/arm/include/asm/cacheflush.h65
-rw-r--r--arch/arm/include/asm/cachetype.h8
-rw-r--r--arch/arm/include/asm/dma-mapping.h8
-rw-r--r--arch/arm/include/asm/ftrace.h20
-rw-r--r--arch/arm/include/asm/hardware/coresight.h34
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h133
-rw-r--r--arch/arm/include/asm/module.h31
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgtable.h30
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/ptrace.h2
-rw-r--r--arch/arm/include/asm/smp_mpidr.h17
-rw-r--r--arch/arm/include/asm/smp_plat.h25
-rw-r--r--arch/arm/include/asm/system.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h36
-rw-r--r--arch/arm/include/asm/unistd.h3
17 files changed, 369 insertions, 82 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6e8f05c8a1c8..062b58c029ab 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -154,16 +154,39 @@
154 .long 9999b,9001f; \ 154 .long 9999b,9001f; \
155 .popsection 155 .popsection
156 156
157#ifdef CONFIG_SMP
158#define ALT_SMP(instr...) \
1599998: instr
160#define ALT_UP(instr...) \
161 .pushsection ".alt.smp.init", "a" ;\
162 .long 9998b ;\
163 instr ;\
164 .popsection
165#define ALT_UP_B(label) \
166 .equ up_b_offset, label - 9998b ;\
167 .pushsection ".alt.smp.init", "a" ;\
168 .long 9998b ;\
169 b . + up_b_offset ;\
170 .popsection
171#else
172#define ALT_SMP(instr...)
173#define ALT_UP(instr...) instr
174#define ALT_UP_B(label) b label
175#endif
176
157/* 177/*
158 * SMP data memory barrier 178 * SMP data memory barrier
159 */ 179 */
160 .macro smp_dmb 180 .macro smp_dmb
161#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
162#if __LINUX_ARM_ARCH__ >= 7 182#if __LINUX_ARM_ARCH__ >= 7
163 dmb 183 ALT_SMP(dmb)
164#elif __LINUX_ARM_ARCH__ == 6 184#elif __LINUX_ARM_ARCH__ == 6
165 mcr p15, 0, r0, c7, c10, 5 @ dmb 185 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
186#else
187#error Incompatible SMP platform
166#endif 188#endif
189 ALT_UP(nop)
167#endif 190#endif
168 .endm 191 .endm
169 192
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4656a24058d2..3acd8fa25e34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -137,10 +137,10 @@
137#endif 137#endif
138 138
139/* 139/*
140 * This flag is used to indicate that the page pointed to by a pte 140 * This flag is used to indicate that the page pointed to by a pte is clean
141 * is dirty and requires cleaning before returning it to the user. 141 * and does not require cleaning before returning it to the user.
142 */ 142 */
143#define PG_dcache_dirty PG_arch_1 143#define PG_dcache_clean PG_arch_1
144 144
145/* 145/*
146 * MM Cache Management 146 * MM Cache Management
@@ -156,6 +156,12 @@
156 * Please note that the implementation of these, and the required 156 * Please note that the implementation of these, and the required
157 * effects are cache-type (VIVT/VIPT/PIPT) specific. 157 * effects are cache-type (VIVT/VIPT/PIPT) specific.
158 * 158 *
159 * flush_icache_all()
160 *
161 * Unconditionally clean and invalidate the entire icache.
162 * Currently only needed for cache-v6.S and cache-v7.S, see
163 * __flush_icache_all for the generic implementation.
164 *
159 * flush_kern_all() 165 * flush_kern_all()
160 * 166 *
161 * Unconditionally clean and invalidate the entire cache. 167 * Unconditionally clean and invalidate the entire cache.
@@ -206,6 +212,7 @@
206 */ 212 */
207 213
208struct cpu_cache_fns { 214struct cpu_cache_fns {
215 void (*flush_icache_all)(void);
209 void (*flush_kern_all)(void); 216 void (*flush_kern_all)(void);
210 void (*flush_user_all)(void); 217 void (*flush_user_all)(void);
211 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 218 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -227,6 +234,7 @@ struct cpu_cache_fns {
227 234
228extern struct cpu_cache_fns cpu_cache; 235extern struct cpu_cache_fns cpu_cache;
229 236
237#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
230#define __cpuc_flush_kern_all cpu_cache.flush_kern_all 238#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
231#define __cpuc_flush_user_all cpu_cache.flush_user_all 239#define __cpuc_flush_user_all cpu_cache.flush_user_all
232#define __cpuc_flush_user_range cpu_cache.flush_user_range 240#define __cpuc_flush_user_range cpu_cache.flush_user_range
@@ -246,6 +254,7 @@ extern struct cpu_cache_fns cpu_cache;
246 254
247#else 255#else
248 256
257#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
249#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 258#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
250#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) 259#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
251#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 260#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
@@ -253,6 +262,7 @@ extern struct cpu_cache_fns cpu_cache;
253#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 262#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
254#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) 263#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
255 264
265extern void __cpuc_flush_icache_all(void);
256extern void __cpuc_flush_kern_all(void); 266extern void __cpuc_flush_kern_all(void);
257extern void __cpuc_flush_user_all(void); 267extern void __cpuc_flush_user_all(void);
258extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 268extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
@@ -291,6 +301,37 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
291/* 301/*
292 * Convert calls to our calling convention. 302 * Convert calls to our calling convention.
293 */ 303 */
304
305/* Invalidate I-cache */
306#define __flush_icache_all_generic() \
307 asm("mcr p15, 0, %0, c7, c5, 0" \
308 : : "r" (0));
309
310/* Invalidate I-cache inner shareable */
311#define __flush_icache_all_v7_smp() \
312 asm("mcr p15, 0, %0, c7, c1, 0" \
313 : : "r" (0));
314
315/*
316 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
317 * will fall through to use __flush_icache_all_generic.
318 */
319#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
320 defined(CONFIG_SMP_ON_UP)
321#define __flush_icache_preferred __cpuc_flush_icache_all
322#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
323#define __flush_icache_preferred __flush_icache_all_v7_smp
324#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
325#define __flush_icache_preferred __cpuc_flush_icache_all
326#else
327#define __flush_icache_preferred __flush_icache_all_generic
328#endif
329
330static inline void __flush_icache_all(void)
331{
332 __flush_icache_preferred();
333}
334
294#define flush_cache_all() __cpuc_flush_kern_all() 335#define flush_cache_all() __cpuc_flush_kern_all()
295 336
296static inline void vivt_flush_cache_mm(struct mm_struct *mm) 337static inline void vivt_flush_cache_mm(struct mm_struct *mm)
@@ -366,21 +407,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
366#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 407#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
367extern void flush_dcache_page(struct page *); 408extern void flush_dcache_page(struct page *);
368 409
369static inline void __flush_icache_all(void)
370{
371#ifdef CONFIG_ARM_ERRATA_411920
372 extern void v6_icache_inval_all(void);
373 v6_icache_inval_all();
374#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
375 asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
376 :
377 : "r" (0));
378#else
379 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
380 :
381 : "r" (0));
382#endif
383}
384static inline void flush_kernel_vmap_range(void *addr, int size) 410static inline void flush_kernel_vmap_range(void *addr, int size)
385{ 411{
386 if ((cache_is_vivt() || cache_is_vipt_aliasing())) 412 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -405,9 +431,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
405#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 431#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
406static inline void flush_kernel_dcache_page(struct page *page) 432static inline void flush_kernel_dcache_page(struct page *page)
407{ 433{
408 /* highmem pages are always flushed upon kunmap already */
409 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
410 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
411} 434}
412 435
413#define flush_dcache_mmap_lock(mapping) \ 436#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index d3a4c2cb9f2f..c023db09fcc1 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -6,6 +6,7 @@
6#define CACHEID_VIPT_ALIASING (1 << 2) 6#define CACHEID_VIPT_ALIASING (1 << 2)
7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) 7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
8#define CACHEID_ASID_TAGGED (1 << 3) 8#define CACHEID_ASID_TAGGED (1 << 3)
9#define CACHEID_VIPT_I_ALIASING (1 << 4)
9 10
10extern unsigned int cacheid; 11extern unsigned int cacheid;
11 12
@@ -14,15 +15,18 @@ extern unsigned int cacheid;
14#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) 15#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
15#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) 16#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
16#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) 17#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
18#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
17 19
18/* 20/*
19 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture 21 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
20 * Mask out support which will never be present on newer CPUs. 22 * Mask out support which will never be present on newer CPUs.
21 * - v6+ is never VIVT 23 * - v6+ is never VIVT
22 * - v7+ VIPT never aliases 24 * - v7+ VIPT never aliases on D-side
23 */ 25 */
24#if __LINUX_ARM_ARCH__ >= 7 26#if __LINUX_ARM_ARCH__ >= 7
25#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED) 27#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
28 CACHEID_ASID_TAGGED |\
29 CACHEID_VIPT_I_ALIASING)
26#elif __LINUX_ARM_ARCH__ >= 6 30#elif __LINUX_ARM_ARCH__ >= 6
27#define __CACHEID_ARCH_MIN (~CACHEID_VIVT) 31#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
28#else 32#else
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c226fe10553e..c568da7dcae4 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
288 * DMA access and 1 if the buffer needs to be bounced. 288 * DMA access and 1 if the buffer needs to be bounced.
289 * 289 *
290 */ 290 */
291#ifdef CONFIG_SA1111
292extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 291extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
293#else
294static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
295 size_t size)
296{
297 return 0;
298}
299#endif
300 292
301/* 293/*
302 * The DMA API, implemented by dmabounce.c. See below for descriptions. 294 * The DMA API, implemented by dmabounce.c. See below for descriptions.
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 103f7ee97313..f89515adac60 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,12 +2,30 @@
2#define _ASM_ARM_FTRACE 2#define _ASM_ARM_FTRACE
3 3
4#ifdef CONFIG_FUNCTION_TRACER 4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void mcount(void); 9extern void mcount(void);
10extern void __gnu_mcount_nc(void); 10extern void __gnu_mcount_nc(void);
11
12#ifdef CONFIG_DYNAMIC_FTRACE
13struct dyn_arch_ftrace {
14#ifdef CONFIG_OLD_MCOUNT
15 bool old_mcount;
16#endif
17};
18
19static inline unsigned long ftrace_call_adjust(unsigned long addr)
20{
21 /* With Thumb-2, the recorded addresses have the lsb set */
22 return addr & ~1;
23}
24
25extern void ftrace_caller_old(void);
26extern void ftrace_call_old(void);
27#endif
28
11#endif 29#endif
12 30
13#endif 31#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 212e47828c79..7ecd793b8f5a 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -21,18 +21,6 @@
21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) 21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) 22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
23 23
24struct tracectx {
25 unsigned int etb_bufsz;
26 void __iomem *etb_regs;
27 void __iomem *etm_regs;
28 unsigned long flags;
29 int ncmppairs;
30 int etm_portsz;
31 struct device *dev;
32 struct clk *emu_clk;
33 struct mutex mutex;
34};
35
36#define TRACER_TIMEOUT 10000 24#define TRACER_TIMEOUT 10000
37 25
38#define etm_writel(t, v, x) \ 26#define etm_writel(t, v, x) \
@@ -112,10 +100,10 @@ struct tracectx {
112 100
113/* ETM status register, "ETM Architecture", 3.3.2 */ 101/* ETM status register, "ETM Architecture", 3.3.2 */
114#define ETMR_STATUS (0x10) 102#define ETMR_STATUS (0x10)
115#define ETMST_OVERFLOW (1 << 0) 103#define ETMST_OVERFLOW BIT(0)
116#define ETMST_PROGBIT (1 << 1) 104#define ETMST_PROGBIT BIT(1)
117#define ETMST_STARTSTOP (1 << 2) 105#define ETMST_STARTSTOP BIT(2)
118#define ETMST_TRIGGER (1 << 3) 106#define ETMST_TRIGGER BIT(3)
119 107
120#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) 108#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
121#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) 109#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
@@ -123,7 +111,7 @@ struct tracectx {
123 111
124#define ETMR_TRACEENCTRL2 0x1c 112#define ETMR_TRACEENCTRL2 0x1c
125#define ETMR_TRACEENCTRL 0x24 113#define ETMR_TRACEENCTRL 0x24
126#define ETMTE_INCLEXCL (1 << 24) 114#define ETMTE_INCLEXCL BIT(24)
127#define ETMR_TRACEENEVT 0x20 115#define ETMR_TRACEENEVT 0x20
128#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ 116#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
129 ETMCTRL_DATA_DO_ADDR | \ 117 ETMCTRL_DATA_DO_ADDR | \
@@ -146,12 +134,12 @@ struct tracectx {
146#define ETBR_CTRL 0x20 134#define ETBR_CTRL 0x20
147#define ETBR_FORMATTERCTRL 0x304 135#define ETBR_FORMATTERCTRL 0x304
148#define ETBFF_ENFTC 1 136#define ETBFF_ENFTC 1
149#define ETBFF_ENFCONT (1 << 1) 137#define ETBFF_ENFCONT BIT(1)
150#define ETBFF_FONFLIN (1 << 4) 138#define ETBFF_FONFLIN BIT(4)
151#define ETBFF_MANUAL_FLUSH (1 << 6) 139#define ETBFF_MANUAL_FLUSH BIT(6)
152#define ETBFF_TRIGIN (1 << 8) 140#define ETBFF_TRIGIN BIT(8)
153#define ETBFF_TRIGEVT (1 << 9) 141#define ETBFF_TRIGEVT BIT(9)
154#define ETBFF_TRIGFL (1 << 10) 142#define ETBFF_TRIGFL BIT(10)
155 143
156#define etb_writel(t, v, x) \ 144#define etb_writel(t, v, x) \
157 (__raw_writel((v), (t)->etb_regs + (x))) 145 (__raw_writel((v), (t)->etb_regs + (x)))
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000000..4d8ae9d67abe
--- /dev/null
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -0,0 +1,133 @@
1#ifndef _ARM_HW_BREAKPOINT_H
2#define _ARM_HW_BREAKPOINT_H
3
4#ifdef __KERNEL__
5
6struct task_struct;
7
8#ifdef CONFIG_HAVE_HW_BREAKPOINT
9
10struct arch_hw_breakpoint_ctrl {
11 u32 __reserved : 9,
12 mismatch : 1,
13 : 9,
14 len : 8,
15 type : 2,
16 privilege : 2,
17 enabled : 1;
18};
19
20struct arch_hw_breakpoint {
21 u32 address;
22 u32 trigger;
23 struct perf_event *suspended_wp;
24 struct arch_hw_breakpoint_ctrl ctrl;
25};
26
27static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
28{
29 return (ctrl.mismatch << 22) | (ctrl.len << 5) | (ctrl.type << 3) |
30 (ctrl.privilege << 1) | ctrl.enabled;
31}
32
33static inline void decode_ctrl_reg(u32 reg,
34 struct arch_hw_breakpoint_ctrl *ctrl)
35{
36 ctrl->enabled = reg & 0x1;
37 reg >>= 1;
38 ctrl->privilege = reg & 0x3;
39 reg >>= 2;
40 ctrl->type = reg & 0x3;
41 reg >>= 2;
42 ctrl->len = reg & 0xff;
43 reg >>= 17;
44 ctrl->mismatch = reg & 0x1;
45}
46
47/* Debug architecture numbers. */
48#define ARM_DEBUG_ARCH_RESERVED 0 /* In case of ptrace ABI updates. */
49#define ARM_DEBUG_ARCH_V6 1
50#define ARM_DEBUG_ARCH_V6_1 2
51#define ARM_DEBUG_ARCH_V7_ECP14 3
52#define ARM_DEBUG_ARCH_V7_MM 4
53
54/* Breakpoint */
55#define ARM_BREAKPOINT_EXECUTE 0
56
57/* Watchpoints */
58#define ARM_BREAKPOINT_LOAD 1
59#define ARM_BREAKPOINT_STORE 2
60
61/* Privilege Levels */
62#define ARM_BREAKPOINT_PRIV 1
63#define ARM_BREAKPOINT_USER 2
64
65/* Lengths */
66#define ARM_BREAKPOINT_LEN_1 0x1
67#define ARM_BREAKPOINT_LEN_2 0x3
68#define ARM_BREAKPOINT_LEN_4 0xf
69#define ARM_BREAKPOINT_LEN_8 0xff
70
71/* Limits */
72#define ARM_MAX_BRP 16
73#define ARM_MAX_WRP 16
74#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
75
76/* DSCR method of entry bits. */
77#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
78#define ARM_ENTRY_BREAKPOINT 0x1
79#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
80#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
81
82/* DSCR monitor/halting bits. */
83#define ARM_DSCR_HDBGEN (1 << 14)
84#define ARM_DSCR_MDBGEN (1 << 15)
85
86/* opcode2 numbers for the co-processor instructions. */
87#define ARM_OP2_BVR 4
88#define ARM_OP2_BCR 5
89#define ARM_OP2_WVR 6
90#define ARM_OP2_WCR 7
91
92/* Base register numbers for the debug registers. */
93#define ARM_BASE_BVR 64
94#define ARM_BASE_BCR 80
95#define ARM_BASE_WVR 96
96#define ARM_BASE_WCR 112
97
98/* Accessor macros for the debug registers. */
99#define ARM_DBG_READ(M, OP2, VAL) do {\
100 asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\
101} while (0)
102
103#define ARM_DBG_WRITE(M, OP2, VAL) do {\
104 asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\
105} while (0)
106
107struct notifier_block;
108struct perf_event;
109struct pmu;
110
111extern struct pmu perf_ops_bp;
112extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
113 int *gen_len, int *gen_type);
114extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
115extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
116extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
117 unsigned long val, void *data);
118
119extern u8 arch_get_debug_arch(void);
120extern u8 arch_get_max_wp_len(void);
121extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
122
123int arch_install_hw_breakpoint(struct perf_event *bp);
124void arch_uninstall_hw_breakpoint(struct perf_event *bp);
125void hw_breakpoint_pmu_read(struct perf_event *bp);
126int hw_breakpoint_slots(int type);
127
128#else
129static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {}
130
131#endif /* CONFIG_HAVE_HW_BREAKPOINT */
132#endif /* __KERNEL__ */
133#endif /* _ARM_HW_BREAKPOINT_H */
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index e4dfa69abb68..cbb0bc295d2b 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -7,20 +7,27 @@
7 7
8struct unwind_table; 8struct unwind_table;
9 9
10struct mod_arch_specific
11{
12#ifdef CONFIG_ARM_UNWIND 10#ifdef CONFIG_ARM_UNWIND
13 Elf_Shdr *unw_sec_init; 11struct arm_unwind_mapping {
14 Elf_Shdr *unw_sec_devinit; 12 Elf_Shdr *unw_sec;
15 Elf_Shdr *unw_sec_core; 13 Elf_Shdr *sec_text;
16 Elf_Shdr *sec_init_text; 14 struct unwind_table *unwind;
17 Elf_Shdr *sec_devinit_text; 15};
18 Elf_Shdr *sec_core_text; 16enum {
19 struct unwind_table *unwind_init; 17 ARM_SEC_INIT,
20 struct unwind_table *unwind_devinit; 18 ARM_SEC_DEVINIT,
21 struct unwind_table *unwind_core; 19 ARM_SEC_CORE,
22#endif 20 ARM_SEC_EXIT,
21 ARM_SEC_DEVEXIT,
22 ARM_SEC_MAX,
23};
24struct mod_arch_specific {
25 struct arm_unwind_mapping map[ARM_SEC_MAX];
23}; 26};
27#else
28struct mod_arch_specific {
29};
30#endif
24 31
25/* 32/*
26 * Include the ARM architecture version. 33 * Include the ARM architecture version.
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 48837e6d8887..b5799a3b7117 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@
17 * counter interrupts are regular interrupts and not an NMI. This 17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call 18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with 19 * perf_event_do_pending() that handles all of the work with
20 * interrupts enabled. 20 * interrupts disabled.
21 */ 21 */
22static inline void 22static inline void
23set_perf_event_pending(void) 23set_perf_event_pending(void)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1ef80f..a9672e8406a3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -278,9 +278,24 @@ extern struct page *empty_zero_page;
278 278
279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
280 280
281#define set_pte_at(mm,addr,ptep,pteval) do { \ 281#if __LINUX_ARM_ARCH__ < 6
282 set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \ 282static inline void __sync_icache_dcache(pte_t pteval)
283 } while (0) 283{
284}
285#else
286extern void __sync_icache_dcache(pte_t pteval);
287#endif
288
289static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep, pte_t pteval)
291{
292 if (addr >= TASK_SIZE)
293 set_pte_ext(ptep, pteval, 0);
294 else {
295 __sync_icache_dcache(pteval);
296 set_pte_ext(ptep, pteval, PTE_EXT_NG);
297 }
298}
284 299
285/* 300/*
286 * The following only work if pte_present() is true. 301 * The following only work if pte_present() is true.
@@ -290,8 +305,13 @@ extern struct page *empty_zero_page;
290#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 305#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
291#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 306#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
292#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 307#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
308#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
293#define pte_special(pte) (0) 309#define pte_special(pte) (0)
294 310
311#define pte_present_user(pte) \
312 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
313 (L_PTE_PRESENT | L_PTE_USER))
314
295#define PTE_BIT_FUNC(fn,op) \ 315#define PTE_BIT_FUNC(fn,op) \
296static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 316static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
297 317
@@ -317,6 +337,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 337#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
318#define pgprot_dmacoherent(prot) \ 338#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 339 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
340#define __HAVE_PHYS_MEM_ACCESS_PROT
341struct file;
342extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
343 unsigned long size, pgprot_t vma_prot);
320#else 344#else
321#define pgprot_dmacoherent(prot) \ 345#define pgprot_dmacoherent(prot) \
322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) 346 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 7bed3daf83b8..67357baaeeeb 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -19,6 +19,7 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22#include <asm/hw_breakpoint.h>
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/types.h> 24#include <asm/types.h>
24 25
@@ -41,6 +42,9 @@ struct debug_entry {
41struct debug_info { 42struct debug_info {
42 int nsaved; 43 int nsaved;
43 struct debug_entry bp[2]; 44 struct debug_entry bp[2];
45#ifdef CONFIG_HAVE_HW_BREAKPOINT
46 struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
47#endif
44}; 48};
45 49
46struct thread_struct { 50struct thread_struct {
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 7ce15eb15f72..783d50f32618 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -29,6 +29,8 @@
29#define PTRACE_SETCRUNCHREGS 26 29#define PTRACE_SETCRUNCHREGS 26
30#define PTRACE_GETVFPREGS 27 30#define PTRACE_GETVFPREGS 27
31#define PTRACE_SETVFPREGS 28 31#define PTRACE_SETVFPREGS 28
32#define PTRACE_GETHBPREGS 29
33#define PTRACE_SETHBPREGS 30
32 34
33/* 35/*
34 * PSR bits 36 * PSR bits
diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h
new file mode 100644
index 000000000000..6a9307d64900
--- /dev/null
+++ b/arch/arm/include/asm/smp_mpidr.h
@@ -0,0 +1,17 @@
1#ifndef ASMARM_SMP_MIDR_H
2#define ASMARM_SMP_MIDR_H
3
4#define hard_smp_processor_id() \
5 ({ \
6 unsigned int cpunum; \
7 __asm__("\n" \
8 "1: mrc p15, 0, %0, c0, c0, 5\n" \
9 " .pushsection \".alt.smp.init\", \"a\"\n"\
10 " .long 1b\n" \
11 " mov %0, #0\n" \
12 " .popsection" \
13 : "=r" (cpunum)); \
14 cpunum &= 0x0F; \
15 })
16
17#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e6215305544a..f24c1b9e211d 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -7,15 +7,40 @@
7 7
8#include <asm/cputype.h> 8#include <asm/cputype.h>
9 9
10/*
11 * Return true if we are running on a SMP platform
12 */
13static inline bool is_smp(void)
14{
15#ifndef CONFIG_SMP
16 return false;
17#elif defined(CONFIG_SMP_ON_UP)
18 extern unsigned int smp_on_up;
19 return !!smp_on_up;
20#else
21 return true;
22#endif
23}
24
10/* all SMP configurations have the extended CPUID registers */ 25/* all SMP configurations have the extended CPUID registers */
11static inline int tlb_ops_need_broadcast(void) 26static inline int tlb_ops_need_broadcast(void)
12{ 27{
28 if (!is_smp())
29 return 0;
30
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 31 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 32}
15 33
34#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
35#define cache_ops_need_broadcast() 0
36#else
16static inline int cache_ops_need_broadcast(void) 37static inline int cache_ops_need_broadcast(void)
17{ 38{
39 if (!is_smp())
40 return 0;
41
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; 42 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19} 43}
44#endif
20 45
21#endif 46#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 8ba1ccf82a02..1120f18a6b17 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -85,6 +85,10 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
85 struct pt_regs *), 85 struct pt_regs *),
86 int sig, int code, const char *name); 86 int sig, int code, const char *name);
87 87
88void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
89 struct pt_regs *),
90 int sig, int code, const char *name);
91
88#define xchg(ptr,x) \ 92#define xchg(ptr,x) \
89 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 93 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
90 94
@@ -325,6 +329,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
325extern void disable_hlt(void); 329extern void disable_hlt(void);
326extern void enable_hlt(void); 330extern void enable_hlt(void);
327 331
332void cpu_idle_wait(void);
333
328#include <asm-generic/cmpxchg-local.h> 334#include <asm-generic/cmpxchg-local.h>
329 335
330#if __LINUX_ARM_ARCH__ < 6 336#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546ae72d4..ce7378ea15a2 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -70,6 +70,10 @@
70#undef _TLB 70#undef _TLB
71#undef MULTI_TLB 71#undef MULTI_TLB
72 72
73#ifdef CONFIG_SMP_ON_UP
74#define MULTI_TLB 1
75#endif
76
73#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) 77#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
74 78
75#ifdef CONFIG_CPU_TLB_V3 79#ifdef CONFIG_CPU_TLB_V3
@@ -185,17 +189,23 @@
185# define v6wbi_always_flags (-1UL) 189# define v6wbi_always_flags (-1UL)
186#endif 190#endif
187 191
188#ifdef CONFIG_SMP 192#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
189#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
190 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 193 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
191#else 194#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
192#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
193 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 195 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
194#endif
195 196
196#ifdef CONFIG_CPU_TLB_V7 197#ifdef CONFIG_CPU_TLB_V7
197# define v7wbi_possible_flags v7wbi_tlb_flags 198
198# define v7wbi_always_flags v7wbi_tlb_flags 199# ifdef CONFIG_SMP_ON_UP
200# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
201# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
202# elif defined(CONFIG_SMP)
203# define v7wbi_possible_flags v7wbi_tlb_flags_smp
204# define v7wbi_always_flags v7wbi_tlb_flags_smp
205# else
206# define v7wbi_possible_flags v7wbi_tlb_flags_up
207# define v7wbi_always_flags v7wbi_tlb_flags_up
208# endif
199# ifdef _TLB 209# ifdef _TLB
200# define MULTI_TLB 1 210# define MULTI_TLB 1
201# else 211# else
@@ -560,12 +570,20 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
560#endif 570#endif
561 571
562/* 572/*
563 * if PG_dcache_dirty is set for the page, we need to ensure that any 573 * If PG_dcache_clean is not set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written 574 * cache entries for the kernels virtual memory range are written
565 * back to the page. 575 * back to the page. On ARMv6 and later, the cache coherency is handled via
576 * the set_pte_at() function.
566 */ 577 */
578#if __LINUX_ARM_ARCH__ < 6
567extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 579extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
568 pte_t *ptep); 580 pte_t *ptep);
581#else
582static inline void update_mmu_cache(struct vm_area_struct *vma,
583 unsigned long addr, pte_t *ptep)
584{
585}
586#endif
569 587
570#endif 588#endif
571 589
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index d02cfb683487..c891eb76c0e3 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -393,6 +393,9 @@
393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) 393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) 394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
395#define __NR_accept4 (__NR_SYSCALL_BASE+366) 395#define __NR_accept4 (__NR_SYSCALL_BASE+366)
396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
396 399
397/* 400/*
398 * The following SWIs are ARM private. 401 * The following SWIs are ARM private.