diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/bL_switcher.h | 77 | ||||
-rw-r--r-- | arch/arm/include/asm/hardirq.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mach/arch.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mcpm.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 75 | ||||
-rw-r--r-- | arch/arm/include/asm/smp.h | 2 |
7 files changed, 156 insertions, 10 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index d3db39860b9c..6577b8aeb711 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild | |||
@@ -24,6 +24,7 @@ generic-y += sembuf.h | |||
24 | generic-y += serial.h | 24 | generic-y += serial.h |
25 | generic-y += shmbuf.h | 25 | generic-y += shmbuf.h |
26 | generic-y += siginfo.h | 26 | generic-y += siginfo.h |
27 | generic-y += simd.h | ||
27 | generic-y += sizes.h | 28 | generic-y += sizes.h |
28 | generic-y += socket.h | 29 | generic-y += socket.h |
29 | generic-y += sockios.h | 30 | generic-y += sockios.h |
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h new file mode 100644 index 000000000000..1714800fa113 --- /dev/null +++ b/arch/arm/include/asm/bL_switcher.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/bL_switcher.h | ||
3 | * | ||
4 | * Created by: Nicolas Pitre, April 2012 | ||
5 | * Copyright: (C) 2012-2013 Linaro Limited | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef ASM_BL_SWITCHER_H | ||
13 | #define ASM_BL_SWITCHER_H | ||
14 | |||
15 | #include <linux/compiler.h> | ||
16 | #include <linux/types.h> | ||
17 | |||
18 | typedef void (*bL_switch_completion_handler)(void *cookie); | ||
19 | |||
20 | int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, | ||
21 | bL_switch_completion_handler completer, | ||
22 | void *completer_cookie); | ||
23 | static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) | ||
24 | { | ||
25 | return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Register here to be notified about runtime enabling/disabling of | ||
30 | * the switcher. | ||
31 | * | ||
32 | * The notifier chain is called with the switcher activation lock held: | ||
33 | * the switcher will not be enabled or disabled during callbacks. | ||
34 | * Callbacks must not call bL_switcher_{get,put}_enabled(). | ||
35 | */ | ||
36 | #define BL_NOTIFY_PRE_ENABLE 0 | ||
37 | #define BL_NOTIFY_POST_ENABLE 1 | ||
38 | #define BL_NOTIFY_PRE_DISABLE 2 | ||
39 | #define BL_NOTIFY_POST_DISABLE 3 | ||
40 | |||
41 | #ifdef CONFIG_BL_SWITCHER | ||
42 | |||
43 | int bL_switcher_register_notifier(struct notifier_block *nb); | ||
44 | int bL_switcher_unregister_notifier(struct notifier_block *nb); | ||
45 | |||
46 | /* | ||
47 | * Use these functions to temporarily prevent enabling/disabling of | ||
48 | * the switcher. | ||
49 | * bL_switcher_get_enabled() returns true if the switcher is currently | ||
50 | * enabled. Each call to bL_switcher_get_enabled() must be followed | ||
51 | * by a call to bL_switcher_put_enabled(). These functions are not | ||
52 | * recursive. | ||
53 | */ | ||
54 | bool bL_switcher_get_enabled(void); | ||
55 | void bL_switcher_put_enabled(void); | ||
56 | |||
57 | int bL_switcher_trace_trigger(void); | ||
58 | int bL_switcher_get_logical_index(u32 mpidr); | ||
59 | |||
60 | #else | ||
61 | static inline int bL_switcher_register_notifier(struct notifier_block *nb) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static inline int bL_switcher_unregister_notifier(struct notifier_block *nb) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static inline bool bL_switcher_get_enabled(void) { return false; } | ||
72 | static inline void bL_switcher_put_enabled(void) { } | ||
73 | static inline int bL_switcher_trace_trigger(void) { return 0; } | ||
74 | static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; } | ||
75 | #endif /* CONFIG_BL_SWITCHER */ | ||
76 | |||
77 | #endif | ||
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 2740c2a2df63..3d7351c844aa 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <asm/irq.h> | 6 | #include <asm/irq.h> |
7 | 7 | ||
8 | #define NR_IPI 6 | 8 | #define NR_IPI 7 |
9 | 9 | ||
10 | typedef struct { | 10 | typedef struct { |
11 | unsigned int __softirq_pending; | 11 | unsigned int __softirq_pending; |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 402a2bc6aa68..17a3fa2979e8 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -49,6 +49,7 @@ struct machine_desc { | |||
49 | bool (*smp_init)(void); | 49 | bool (*smp_init)(void); |
50 | void (*fixup)(struct tag *, char **, | 50 | void (*fixup)(struct tag *, char **, |
51 | struct meminfo *); | 51 | struct meminfo *); |
52 | void (*init_meminfo)(void); | ||
52 | void (*reserve)(void);/* reserve mem blocks */ | 53 | void (*reserve)(void);/* reserve mem blocks */ |
53 | void (*map_io)(void);/* IO mapping function */ | 54 | void (*map_io)(void);/* IO mapping function */ |
54 | void (*init_early)(void); | 55 | void (*init_early)(void); |
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 0f7b7620e9a5..7626a7fd4938 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h | |||
@@ -42,6 +42,14 @@ extern void mcpm_entry_point(void); | |||
42 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); | 42 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * This sets an early poke i.e a value to be poked into some address | ||
46 | * from very early assembly code before the CPU is ungated. The | ||
47 | * address must be physical, and if 0 then nothing will happen. | ||
48 | */ | ||
49 | void mcpm_set_early_poke(unsigned cpu, unsigned cluster, | ||
50 | unsigned long poke_phys_addr, unsigned long poke_val); | ||
51 | |||
52 | /* | ||
45 | * CPU/cluster power operations API for higher subsystems to use. | 53 | * CPU/cluster power operations API for higher subsystems to use. |
46 | */ | 54 | */ |
47 | 55 | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e750a938fd3c..6748d6295a1a 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -172,8 +172,14 @@ | |||
172 | * so that all we need to do is modify the 8-bit constant field. | 172 | * so that all we need to do is modify the 8-bit constant field. |
173 | */ | 173 | */ |
174 | #define __PV_BITS_31_24 0x81000000 | 174 | #define __PV_BITS_31_24 0x81000000 |
175 | #define __PV_BITS_7_0 0x81 | ||
176 | |||
177 | extern phys_addr_t (*arch_virt_to_idmap) (unsigned long x); | ||
178 | extern u64 __pv_phys_offset; | ||
179 | extern u64 __pv_offset; | ||
180 | extern void fixup_pv_table(const void *, unsigned long); | ||
181 | extern const void *__pv_table_begin, *__pv_table_end; | ||
175 | 182 | ||
176 | extern unsigned long __pv_phys_offset; | ||
177 | #define PHYS_OFFSET __pv_phys_offset | 183 | #define PHYS_OFFSET __pv_phys_offset |
178 | 184 | ||
179 | #define __pv_stub(from,to,instr,type) \ | 185 | #define __pv_stub(from,to,instr,type) \ |
@@ -185,22 +191,58 @@ extern unsigned long __pv_phys_offset; | |||
185 | : "=r" (to) \ | 191 | : "=r" (to) \ |
186 | : "r" (from), "I" (type)) | 192 | : "r" (from), "I" (type)) |
187 | 193 | ||
188 | static inline unsigned long __virt_to_phys(unsigned long x) | 194 | #define __pv_stub_mov_hi(t) \ |
195 | __asm__ volatile("@ __pv_stub_mov\n" \ | ||
196 | "1: mov %R0, %1\n" \ | ||
197 | " .pushsection .pv_table,\"a\"\n" \ | ||
198 | " .long 1b\n" \ | ||
199 | " .popsection\n" \ | ||
200 | : "=r" (t) \ | ||
201 | : "I" (__PV_BITS_7_0)) | ||
202 | |||
203 | #define __pv_add_carry_stub(x, y) \ | ||
204 | __asm__ volatile("@ __pv_add_carry_stub\n" \ | ||
205 | "1: adds %Q0, %1, %2\n" \ | ||
206 | " adc %R0, %R0, #0\n" \ | ||
207 | " .pushsection .pv_table,\"a\"\n" \ | ||
208 | " .long 1b\n" \ | ||
209 | " .popsection\n" \ | ||
210 | : "+r" (y) \ | ||
211 | : "r" (x), "I" (__PV_BITS_31_24) \ | ||
212 | : "cc") | ||
213 | |||
214 | static inline phys_addr_t __virt_to_phys(unsigned long x) | ||
189 | { | 215 | { |
190 | unsigned long t; | 216 | phys_addr_t t; |
191 | __pv_stub(x, t, "add", __PV_BITS_31_24); | 217 | |
218 | if (sizeof(phys_addr_t) == 4) { | ||
219 | __pv_stub(x, t, "add", __PV_BITS_31_24); | ||
220 | } else { | ||
221 | __pv_stub_mov_hi(t); | ||
222 | __pv_add_carry_stub(x, t); | ||
223 | } | ||
192 | return t; | 224 | return t; |
193 | } | 225 | } |
194 | 226 | ||
195 | static inline unsigned long __phys_to_virt(unsigned long x) | 227 | static inline unsigned long __phys_to_virt(phys_addr_t x) |
196 | { | 228 | { |
197 | unsigned long t; | 229 | unsigned long t; |
198 | __pv_stub(x, t, "sub", __PV_BITS_31_24); | 230 | __pv_stub(x, t, "sub", __PV_BITS_31_24); |
199 | return t; | 231 | return t; |
200 | } | 232 | } |
233 | |||
201 | #else | 234 | #else |
202 | #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) | 235 | |
203 | #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) | 236 | static inline phys_addr_t __virt_to_phys(unsigned long x) |
237 | { | ||
238 | return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; | ||
239 | } | ||
240 | |||
241 | static inline unsigned long __phys_to_virt(phys_addr_t x) | ||
242 | { | ||
243 | return x - PHYS_OFFSET + PAGE_OFFSET; | ||
244 | } | ||
245 | |||
204 | #endif | 246 | #endif |
205 | #endif | 247 | #endif |
206 | #endif /* __ASSEMBLY__ */ | 248 | #endif /* __ASSEMBLY__ */ |
@@ -238,17 +280,32 @@ static inline phys_addr_t virt_to_phys(const volatile void *x) | |||
238 | 280 | ||
239 | static inline void *phys_to_virt(phys_addr_t x) | 281 | static inline void *phys_to_virt(phys_addr_t x) |
240 | { | 282 | { |
241 | return (void *)(__phys_to_virt((unsigned long)(x))); | 283 | return (void *)__phys_to_virt(x); |
242 | } | 284 | } |
243 | 285 | ||
244 | /* | 286 | /* |
245 | * Drivers should NOT use these either. | 287 | * Drivers should NOT use these either. |
246 | */ | 288 | */ |
247 | #define __pa(x) __virt_to_phys((unsigned long)(x)) | 289 | #define __pa(x) __virt_to_phys((unsigned long)(x)) |
248 | #define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) | 290 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
249 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 291 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
250 | 292 | ||
251 | /* | 293 | /* |
294 | * These are for systems that have a hardware interconnect supported alias of | ||
295 | * physical memory for idmap purposes. Most cases should leave these | ||
296 | * untouched. | ||
297 | */ | ||
298 | static inline phys_addr_t __virt_to_idmap(unsigned long x) | ||
299 | { | ||
300 | if (arch_virt_to_idmap) | ||
301 | return arch_virt_to_idmap(x); | ||
302 | else | ||
303 | return __virt_to_phys(x); | ||
304 | } | ||
305 | |||
306 | #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) | ||
307 | |||
308 | /* | ||
252 | * Virtual <-> DMA view memory address translations | 309 | * Virtual <-> DMA view memory address translations |
253 | * Again, these are *only* valid on the kernel direct mapped RAM | 310 | * Again, these are *only* valid on the kernel direct mapped RAM |
254 | * memory. Use of these is *deprecated* (and that doesn't mean | 311 | * memory. Use of these is *deprecated* (and that doesn't mean |
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a8cae71caceb..22a3b9b5d4a1 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h | |||
@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu); | |||
84 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 84 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
85 | extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); | 85 | extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); |
86 | 86 | ||
87 | extern int register_ipi_completion(struct completion *completion, int cpu); | ||
88 | |||
87 | struct smp_operations { | 89 | struct smp_operations { |
88 | #ifdef CONFIG_SMP | 90 | #ifdef CONFIG_SMP |
89 | /* | 91 | /* |