aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h75
-rw-r--r--arch/arm/include/asm/cp15.h16
-rw-r--r--arch/arm/include/asm/cputype.h61
-rw-r--r--arch/arm/include/asm/firmware.h66
-rw-r--r--arch/arm/include/asm/glue-cache.h8
-rw-r--r--arch/arm/include/asm/glue-df.h20
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h2
-rw-r--r--arch/arm/include/asm/idmap.h1
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
-rw-r--r--arch/arm/include/asm/kvm_host.h57
-rw-r--r--arch/arm/include/asm/kvm_mmu.h87
-rw-r--r--arch/arm/include/asm/kvm_vgic.h1
-rw-r--r--arch/arm/include/asm/mach/irq.h36
-rw-r--r--arch/arm/include/asm/mach/pci.h11
-rw-r--r--arch/arm/include/asm/mcpm.h209
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/system_misc.h3
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h13
-rw-r--r--arch/arm/include/asm/unistd.h8
-rw-r--r--arch/arm/include/debug/bcm2835.S22
-rw-r--r--arch/arm/include/debug/cns3xxx.S19
-rw-r--r--arch/arm/include/debug/exynos.S39
-rw-r--r--arch/arm/include/debug/mvebu.S2
-rw-r--r--arch/arm/include/debug/mxs.S27
-rw-r--r--arch/arm/include/debug/nomadik.S20
-rw-r--r--arch/arm/include/debug/pxa.S33
-rw-r--r--arch/arm/include/debug/samsung.S87
-rw-r--r--arch/arm/include/debug/sirf.S42
-rw-r--r--arch/arm/include/debug/uncompress.h7
-rw-r--r--arch/arm/include/debug/ux500.S48
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
37 files changed, 1064 insertions, 122 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index c79f61faa3a5..da1c77d39327 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,6 +243,29 @@ typedef struct {
243 243
244#define ATOMIC64_INIT(i) { (i) } 244#define ATOMIC64_INIT(i) { (i) }
245 245
246#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v)
248{
249 u64 result;
250
251 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]"
253 : "=&r" (result)
254 : "r" (&v->counter), "Qo" (v->counter)
255 );
256
257 return result;
258}
259
260static inline void atomic64_set(atomic64_t *v, u64 i)
261{
262 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]"
264 : "=Qo" (v->counter)
265 : "r" (&v->counter), "r" (i)
266 );
267}
268#else
246static inline u64 atomic64_read(const atomic64_t *v) 269static inline u64 atomic64_read(const atomic64_t *v)
247{ 270{
248 u64 result; 271 u64 result;
@@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
269 : "r" (&v->counter), "r" (i) 292 : "r" (&v->counter), "r" (i)
270 : "cc"); 293 : "cc");
271} 294}
295#endif
272 296
273static inline void atomic64_add(u64 i, atomic64_t *v) 297static inline void atomic64_add(u64 i, atomic64_t *v)
274{ 298{
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd12..bff71388e72a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
363 flush_cache_all(); 363 flush_cache_all();
364} 364}
365 365
366/*
367 * Memory synchronization helpers for mixed cached vs non cached accesses.
368 *
369 * Some synchronization algorithms have to set states in memory with the
370 * cache enabled or disabled depending on the code path. It is crucial
371 * to always ensure proper cache maintenance to update main memory right
372 * away in that case.
373 *
374 * Any cached write must be followed by a cache clean operation.
375 * Any cached read must be preceded by a cache invalidate operation.
376 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
377 * operation is needed to avoid discarding possible concurrent writes to the
378 * accessed memory.
379 *
380 * Also, in order to prevent a cached writer from interfering with an
381 * adjacent non-cached writer, each state variable must be located to
382 * a separate cache line.
383 */
384
385/*
386 * This needs to be >= the max cache writeback size of all
387 * supported platforms included in the current kernel configuration.
388 * This is used to align state variables to their own cache lines.
389 */
390#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
391#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
392
393/*
394 * There is no __cpuc_clean_dcache_area but we use it anyway for
395 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
396 */
397#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
398
399/*
400 * Ensure preceding writes to *p by this CPU are visible to
401 * subsequent reads by other CPUs:
402 */
403static inline void __sync_cache_range_w(volatile void *p, size_t size)
404{
405 char *_p = (char *)p;
406
407 __cpuc_clean_dcache_area(_p, size);
408 outer_clean_range(__pa(_p), __pa(_p + size));
409}
410
411/*
412 * Ensure preceding writes to *p by other CPUs are visible to
413 * subsequent reads by this CPU. We must be careful not to
414 * discard data simultaneously written by another CPU, hence the
415 * usage of flush rather than invalidate operations.
416 */
417static inline void __sync_cache_range_r(volatile void *p, size_t size)
418{
419 char *_p = (char *)p;
420
421#ifdef CONFIG_OUTER_CACHE
422 if (outer_cache.flush_range) {
423 /*
424 * Ensure dirty data migrated from other CPUs into our cache
425 * are cleaned out safely before the outer cache is cleaned:
426 */
427 __cpuc_clean_dcache_area(_p, size);
428
429 /* Clean and invalidate stale data for *p from outer ... */
430 outer_flush_range(__pa(_p), __pa(_p + size));
431 }
432#endif
433
434 /* ... and inner cache: */
435 __cpuc_flush_dcache_area(_p, size);
436}
437
438#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
439#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
440
366#endif 441#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a60..1f3262e99d81 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -42,6 +42,8 @@
42#define vectors_high() (0) 42#define vectors_high() (0)
43#endif 43#endif
44 44
45#ifdef CONFIG_CPU_CP15
46
45extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 47extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
46extern unsigned long cr_alignment; /* defined in entry-armv.S */ 48extern unsigned long cr_alignment; /* defined in entry-armv.S */
47 49
@@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val)
82 isb(); 84 isb();
83} 85}
84 86
85#endif 87#else /* ifdef CONFIG_CPU_CP15 */
88
89/*
90 * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
91 * minds of the developers). Yielding 0 for machines without a cp15 (and making
92 * it read-only) is fine for most cases and saves quite some #ifdeffery.
93 */
94#define cr_no_alignment UL(0)
95#define cr_alignment UL(0)
96
97#endif /* ifdef CONFIG_CPU_CP15 / else */
98
99#endif /* ifndef __ASSEMBLY__ */
86 100
87#endif 101#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ad41ec2471e8..7652712d1d14 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -38,6 +38,24 @@
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ 38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) 39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40 40
41#define ARM_CPU_IMP_ARM 0x41
42#define ARM_CPU_IMP_INTEL 0x69
43
44#define ARM_CPU_PART_ARM1136 0xB360
45#define ARM_CPU_PART_ARM1156 0xB560
46#define ARM_CPU_PART_ARM1176 0xB760
47#define ARM_CPU_PART_ARM11MPCORE 0xB020
48#define ARM_CPU_PART_CORTEX_A8 0xC080
49#define ARM_CPU_PART_CORTEX_A9 0xC090
50#define ARM_CPU_PART_CORTEX_A5 0xC050
51#define ARM_CPU_PART_CORTEX_A15 0xC0F0
52#define ARM_CPU_PART_CORTEX_A7 0xC070
53
54#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
55#define ARM_CPU_XSCALE_ARCH_V1 0x2000
56#define ARM_CPU_XSCALE_ARCH_V2 0x4000
57#define ARM_CPU_XSCALE_ARCH_V3 0x6000
58
41extern unsigned int processor_id; 59extern unsigned int processor_id;
42 60
43#ifdef CONFIG_CPU_CP15 61#ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
50 : "cc"); \ 68 : "cc"); \
51 __val; \ 69 __val; \
52 }) 70 })
71
53#define read_cpuid_ext(ext_reg) \ 72#define read_cpuid_ext(ext_reg) \
54 ({ \ 73 ({ \
55 unsigned int __val; \ 74 unsigned int __val; \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
59 : "cc"); \ 78 : "cc"); \
60 __val; \ 79 __val; \
61 }) 80 })
62#else
63#define read_cpuid(reg) (processor_id)
64#define read_cpuid_ext(reg) 0
65#endif
66 81
67#define ARM_CPU_IMP_ARM 0x41 82#else /* ifdef CONFIG_CPU_CP15 */
68#define ARM_CPU_IMP_INTEL 0x69
69 83
70#define ARM_CPU_PART_ARM1136 0xB360 84/*
71#define ARM_CPU_PART_ARM1156 0xB560 85 * read_cpuid and read_cpuid_ext should only ever be called on machines that
72#define ARM_CPU_PART_ARM1176 0xB760 86 * have cp15 so warn on other usages.
73#define ARM_CPU_PART_ARM11MPCORE 0xB020 87 */
74#define ARM_CPU_PART_CORTEX_A8 0xC080 88#define read_cpuid(reg) \
75#define ARM_CPU_PART_CORTEX_A9 0xC090 89 ({ \
76#define ARM_CPU_PART_CORTEX_A5 0xC050 90 WARN_ON_ONCE(1); \
77#define ARM_CPU_PART_CORTEX_A15 0xC0F0 91 0; \
78#define ARM_CPU_PART_CORTEX_A7 0xC070 92 })
79 93
80#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 94#define read_cpuid_ext(reg) read_cpuid(reg)
81#define ARM_CPU_XSCALE_ARCH_V1 0x2000 95
82#define ARM_CPU_XSCALE_ARCH_V2 0x4000 96#endif /* ifdef CONFIG_CPU_CP15 / else */
83#define ARM_CPU_XSCALE_ARCH_V3 0x6000
84 97
98#ifdef CONFIG_CPU_CP15
85/* 99/*
86 * The CPU ID never changes at run time, so we might as well tell the 100 * The CPU ID never changes at run time, so we might as well tell the
87 * compiler that it's constant. Use this function to read the CPU ID 101 * compiler that it's constant. Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
92 return read_cpuid(CPUID_ID); 106 return read_cpuid(CPUID_ID);
93} 107}
94 108
109#else /* ifdef CONFIG_CPU_CP15 */
110
111static inline unsigned int __attribute_const__ read_cpuid_id(void)
112{
113 return processor_id;
114}
115
116#endif /* ifdef CONFIG_CPU_CP15 / else */
117
95static inline unsigned int __attribute_const__ read_cpuid_implementor(void) 118static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
96{ 119{
97 return (read_cpuid_id() & 0xFF000000) >> 24; 120 return (read_cpuid_id() & 0xFF000000) >> 24;
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
new file mode 100644
index 000000000000..15631300c238
--- /dev/null
+++ b/arch/arm/include/asm/firmware.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics.
3 * Kyungmin Park <kyungmin.park@samsung.com>
4 * Tomasz Figa <t.figa@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARM_FIRMWARE_H
12#define __ASM_ARM_FIRMWARE_H
13
14#include <linux/bug.h>
15
16/*
17 * struct firmware_ops
18 *
19 * A structure to specify available firmware operations.
20 *
21 * A filled up structure can be registered with register_firmware_ops().
22 */
23struct firmware_ops {
24 /*
25 * Enters CPU idle mode
26 */
27 int (*do_idle)(void);
28 /*
29 * Sets boot address of specified physical CPU
30 */
31 int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
32 /*
33 * Boots specified physical CPU
34 */
35 int (*cpu_boot)(int cpu);
36 /*
37 * Initializes L2 cache
38 */
39 int (*l2x0_init)(void);
40};
41
42/* Global pointer for current firmware_ops structure, can't be NULL. */
43extern const struct firmware_ops *firmware_ops;
44
45/*
46 * call_firmware_op(op, ...)
47 *
48 * Checks if firmware operation is present and calls it,
49 * otherwise returns -ENOSYS
50 */
51#define call_firmware_op(op, ...) \
52 ((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS))
53
54/*
55 * register_firmware_ops(ops)
56 *
57 * A function to register platform firmware_ops struct.
58 */
59static inline void register_firmware_ops(const struct firmware_ops *ops)
60{
61 BUG_ON(!ops);
62
63 firmware_ops = ops;
64}
65
66#endif
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cca9f15704ed..ea289e1435e7 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -19,14 +19,6 @@
19#undef _CACHE 19#undef _CACHE
20#undef MULTI_CACHE 20#undef MULTI_CACHE
21 21
22#if defined(CONFIG_CPU_CACHE_V3)
23# ifdef _CACHE
24# define MULTI_CACHE 1
25# else
26# define _CACHE v3
27# endif
28#endif
29
30#if defined(CONFIG_CPU_CACHE_V4) 22#if defined(CONFIG_CPU_CACHE_V4)
31# ifdef _CACHE 23# ifdef _CACHE
32# define MULTI_CACHE 1 24# define MULTI_CACHE 1
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76da..b6e9f2c108b5 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
18 * ================ 18 * ================
19 * 19 *
20 * We have the following to choose from: 20 * We have the following to choose from:
21 * arm6 - ARM6 style
22 * arm7 - ARM7 style 21 * arm7 - ARM7 style
23 * v4_early - ARMv4 without Thumb early abort handler 22 * v4_early - ARMv4 without Thumb early abort handler
24 * v4t_late - ARMv4 with Thumb late abort handler 23 * v4t_late - ARMv4 with Thumb late abort handler
25 * v4t_early - ARMv4 with Thumb early abort handler 24 * v4t_early - ARMv4 with Thumb early abort handler
26 * v5tej_early - ARMv5 with Thumb and Java early abort handler 25 * v5t_early - ARMv5 with Thumb early abort handler
26 * v5tj_early - ARMv5 with Thumb and Java early abort handler
27 * xscale - ARMv5 with Thumb with Xscale extensions 27 * xscale - ARMv5 with Thumb with Xscale extensions
28 * v6_early - ARMv6 generic early abort handler 28 * v6_early - ARMv6 generic early abort handler
29 * v7_early - ARMv7 generic early abort handler 29 * v7_early - ARMv7 generic early abort handler
@@ -39,19 +39,19 @@
39# endif 39# endif
40#endif 40#endif
41 41
42#ifdef CONFIG_CPU_ABRT_LV4T 42#ifdef CONFIG_CPU_ABRT_EV4
43# ifdef CPU_DABORT_HANDLER 43# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1 44# define MULTI_DABORT 1
45# else 45# else
46# define CPU_DABORT_HANDLER v4t_late_abort 46# define CPU_DABORT_HANDLER v4_early_abort
47# endif 47# endif
48#endif 48#endif
49 49
50#ifdef CONFIG_CPU_ABRT_EV4 50#ifdef CONFIG_CPU_ABRT_LV4T
51# ifdef CPU_DABORT_HANDLER 51# ifdef CPU_DABORT_HANDLER
52# define MULTI_DABORT 1 52# define MULTI_DABORT 1
53# else 53# else
54# define CPU_DABORT_HANDLER v4_early_abort 54# define CPU_DABORT_HANDLER v4t_late_abort
55# endif 55# endif
56#endif 56#endif
57 57
@@ -63,19 +63,19 @@
63# endif 63# endif
64#endif 64#endif
65 65
66#ifdef CONFIG_CPU_ABRT_EV5TJ 66#ifdef CONFIG_CPU_ABRT_EV5T
67# ifdef CPU_DABORT_HANDLER 67# ifdef CPU_DABORT_HANDLER
68# define MULTI_DABORT 1 68# define MULTI_DABORT 1
69# else 69# else
70# define CPU_DABORT_HANDLER v5tj_early_abort 70# define CPU_DABORT_HANDLER v5t_early_abort
71# endif 71# endif
72#endif 72#endif
73 73
74#ifdef CONFIG_CPU_ABRT_EV5T 74#ifdef CONFIG_CPU_ABRT_EV5TJ
75# ifdef CPU_DABORT_HANDLER 75# ifdef CPU_DABORT_HANDLER
76# define MULTI_DABORT 1 76# define MULTI_DABORT 1
77# else 77# else
78# define CPU_DABORT_HANDLER v5t_early_abort 78# define CPU_DABORT_HANDLER v5tj_early_abort
79# endif 79# endif
80#endif 80#endif
81 81
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fbe2477..ed94b1a366ae 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
37 * IOP3XX processor registers 37 * IOP3XX processor registers
38 */ 38 */
39#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 39#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
40#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 40#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
41#define IOP3XX_PERIPHERAL_SIZE 0x00002000 41#define IOP3XX_PERIPHERAL_SIZE 0x00002000
42#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ 42#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
43 IOP3XX_PERIPHERAL_SIZE - 1) 43 IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index 1a66f907e5cc..bf863edb517d 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -8,7 +8,6 @@
8#define __idmap __section(.idmap.text) noinline notrace 8#define __idmap __section(.idmap.text) noinline notrace
9 9
10extern pgd_t *idmap_pgd; 10extern pgd_t *idmap_pgd;
11extern pgd_t *hyp_pgd;
12 11
13void setup_mm_for_reboot(void); 12void setup_mm_for_reboot(void);
14 13
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 35c21c375d81..53c15dec7af6 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -30,6 +30,11 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
30void handle_IRQ(unsigned int, struct pt_regs *); 30void handle_IRQ(unsigned int, struct pt_regs *);
31void init_IRQ(void); 31void init_IRQ(void);
32 32
33#ifdef CONFIG_MULTI_IRQ_HANDLER
34extern void (*handle_arch_irq)(struct pt_regs *);
35extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
36#endif
37
33#endif 38#endif
34 39
35#endif 40#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 7c3d813e15df..124623e5ef14 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -211,4 +211,8 @@
211 211
212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) 212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
213 213
214#define HSR_DABT_S1PTW (1U << 7)
215#define HSR_DABT_CM (1U << 8)
216#define HSR_DABT_EA (1U << 9)
217
214#endif /* __ARM_KVM_ARM_H__ */ 218#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e1..18d50322a9e2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
76 76
77extern void __kvm_flush_vm_context(void); 77extern void __kvm_flush_vm_context(void);
78extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
79 79
80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
81#endif 81#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb5..82b4babead2c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/kvm_arm.h>
25 26
26u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 27unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
27u32 *vcpu_spsr(struct kvm_vcpu *vcpu); 28unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
28 29
29int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); 30bool kvm_condition_valid(struct kvm_vcpu *vcpu);
30void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 31void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
31void kvm_inject_undefined(struct kvm_vcpu *vcpu); 32void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
37 return 1; 38 return 1;
38} 39}
39 40
40static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) 41static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
41{ 42{
42 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; 43 return &vcpu->arch.regs.usr_regs.ARM_pc;
43} 44}
44 45
45static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) 46static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
46{ 47{
47 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; 48 return &vcpu->arch.regs.usr_regs.ARM_cpsr;
48} 49}
49 50
50static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 51static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
69 return reg == 15; 70 return reg == 15;
70} 71}
71 72
73static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
74{
75 return vcpu->arch.fault.hsr;
76}
77
78static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
79{
80 return vcpu->arch.fault.hxfar;
81}
82
83static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
84{
85 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
86}
87
88static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
89{
90 return vcpu->arch.fault.hyp_pc;
91}
92
93static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
94{
95 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
96}
97
98static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
99{
100 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
101}
102
103static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
104{
105 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
106}
107
108static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
109{
110 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
111}
112
113static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
114{
115 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
116}
117
118static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
119{
120 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
121}
122
123/* Get Access Size from a data abort */
124static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
125{
126 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
127 case 0:
128 return 1;
129 case 1:
130 return 2;
131 case 2:
132 return 4;
133 default:
134 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
135 return -EFAULT;
136 }
137}
138
139/* This one is not specific to Data Abort */
140static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
141{
142 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
143}
144
145static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
146{
147 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
148}
149
150static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
151{
152 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
153}
154
155static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
156{
157 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
158}
159
160static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
161{
162 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
163}
164
72#endif /* __ARM_KVM_EMULATE_H__ */ 165#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d1736a53b12d..57cb786a6203 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
80 void *objects[KVM_NR_MEM_OBJS]; 80 void *objects[KVM_NR_MEM_OBJS];
81}; 81};
82 82
83struct kvm_vcpu_fault_info {
84 u32 hsr; /* Hyp Syndrome Register */
85 u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
86 u32 hpfar; /* Hyp IPA Fault Address Register */
87 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
88};
89
90typedef struct vfp_hard_struct kvm_cpu_context_t;
91
83struct kvm_vcpu_arch { 92struct kvm_vcpu_arch {
84 struct kvm_regs regs; 93 struct kvm_regs regs;
85 94
@@ -93,13 +102,13 @@ struct kvm_vcpu_arch {
93 u32 midr; 102 u32 midr;
94 103
95 /* Exception Information */ 104 /* Exception Information */
96 u32 hsr; /* Hyp Syndrome Register */ 105 struct kvm_vcpu_fault_info fault;
97 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
98 u32 hpfar; /* Hyp IPA Fault Address Register */
99 106
100 /* Floating point registers (VFP and Advanced SIMD/NEON) */ 107 /* Floating point registers (VFP and Advanced SIMD/NEON) */
101 struct vfp_hard_struct vfp_guest; 108 struct vfp_hard_struct vfp_guest;
102 struct vfp_hard_struct *vfp_host; 109
110 /* Host FP context */
111 kvm_cpu_context_t *host_cpu_context;
103 112
104 /* VGIC state */ 113 /* VGIC state */
105 struct vgic_cpu vgic_cpu; 114 struct vgic_cpu vgic_cpu;
@@ -122,9 +131,6 @@ struct kvm_vcpu_arch {
122 /* Interrupt related fields */ 131 /* Interrupt related fields */
123 u32 irq_lines; /* IRQ and FIQ levels */ 132 u32 irq_lines; /* IRQ and FIQ levels */
124 133
125 /* Hyp exception information */
126 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
127
128 /* Cache some mmu pages needed inside spinlock regions */ 134 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache; 135 struct kvm_mmu_memory_cache mmu_page_cache;
130 136
@@ -181,4 +187,41 @@ struct kvm_one_reg;
181int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 187int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
182int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 188int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
183 189
190int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
191 int exception_index);
192
193static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr,
194 unsigned long long pgd_ptr,
195 unsigned long hyp_stack_ptr,
196 unsigned long vector_ptr)
197{
198 /*
199 * Call initialization code, and switch to the full blown HYP
200 * code. The init code doesn't need to preserve these
201 * registers as r0-r3 are already callee saved according to
202 * the AAPCS.
203 * Note that we slightly misuse the prototype by casing the
204 * stack pointer to a void *.
205 *
206 * We don't have enough registers to perform the full init in
207 * one go. Install the boot PGD first, and then install the
208 * runtime PGD, stack pointer and vectors. The PGDs are always
209 * passed as the third argument, in order to be passed into
210 * r2-r3 to the init code (yes, this is compliant with the
211 * PCS!).
212 */
213
214 kvm_call_hyp(NULL, 0, boot_pgd_ptr);
215
216 kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
217}
218
219static inline int kvm_arch_dev_ioctl_check_extension(long ext)
220{
221 return 0;
222}
223
224int kvm_perf_init(void);
225int kvm_perf_teardown(void);
226
184#endif /* __ARM_KVM_HOST_H__ */ 227#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b34874..472ac7091003 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,9 +19,33 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/memory.h>
23#include <asm/page.h>
24
25/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
29#define HYP_PAGE_OFFSET_MASK UL(~0)
30#define HYP_PAGE_OFFSET PAGE_OFFSET
31#define KERN_TO_HYP(kva) (kva)
32
33/*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
40#ifndef __ASSEMBLY__
41
42#include <asm/cacheflush.h>
43#include <asm/pgalloc.h>
44
22int create_hyp_mappings(void *from, void *to); 45int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 46int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 47void free_boot_hyp_pgd(void);
48void free_hyp_pgds(void);
25 49
26int kvm_alloc_stage2_pgd(struct kvm *kvm); 50int kvm_alloc_stage2_pgd(struct kvm *kvm);
27void kvm_free_stage2_pgd(struct kvm *kvm); 51void kvm_free_stage2_pgd(struct kvm *kvm);
@@ -33,9 +57,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
33void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 57void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
34 58
35phys_addr_t kvm_mmu_get_httbr(void); 59phys_addr_t kvm_mmu_get_httbr(void);
60phys_addr_t kvm_mmu_get_boot_httbr(void);
61phys_addr_t kvm_get_idmap_vector(void);
36int kvm_mmu_init(void); 62int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void); 63void kvm_clear_hyp_idmap(void);
38 64
65static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
66{
67 pte_val(*pte) = new_pte;
68 /*
69 * flush_pmd_entry just takes a void pointer and cleans the necessary
70 * cache entries, so we can reuse the function for ptes.
71 */
72 flush_pmd_entry(pte);
73}
74
39static inline bool kvm_is_write_fault(unsigned long hsr) 75static inline bool kvm_is_write_fault(unsigned long hsr)
40{ 76{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; 77 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +83,53 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
47 return true; 83 return true;
48} 84}
49 85
86static inline void kvm_clean_pgd(pgd_t *pgd)
87{
88 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
89}
90
91static inline void kvm_clean_pmd_entry(pmd_t *pmd)
92{
93 clean_pmd_entry(pmd);
94}
95
96static inline void kvm_clean_pte(pte_t *pte)
97{
98 clean_pte_table(pte);
99}
100
101static inline void kvm_set_s2pte_writable(pte_t *pte)
102{
103 pte_val(*pte) |= L_PTE_S2_RDWR;
104}
105
106struct kvm;
107
108static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
109{
110 /*
111 * If we are going to insert an instruction page and the icache is
112 * either VIPT or PIPT, there is a potential problem where the host
113 * (or another VM) may have used the same page as this guest, and we
114 * read incorrect data from the icache. If we're using a PIPT cache,
115 * we can invalidate just that page, but if we are using a VIPT cache
116 * we need to invalidate the entire icache - damn shame - as written
117 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
118 *
119 * VIVT caches are tagged using both the ASID and the VMID and doesn't
120 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
121 */
122 if (icache_is_pipt()) {
123 unsigned long hva = gfn_to_hva(kvm, gfn);
124 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
125 } else if (!icache_is_vivt_asid_tagged()) {
126 /* any kind of VIPT cache */
127 __flush_icache_all();
128 }
129}
130
131#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
132
133#endif /* !__ASSEMBLY__ */
134
50#endif /* __ARM_KVM_MMU_H__ */ 135#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index ab97207d9cd3..343744e4809c 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kvm.h> 23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/irqreturn.h> 24#include <linux/irqreturn.h>
26#include <linux/spinlock.h> 25#include <linux/spinlock.h>
27#include <linux/types.h> 26#include <linux/types.h>
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 18c883023339..2092ee1e1300 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -20,11 +20,6 @@ struct seq_file;
20extern void init_FIQ(int); 20extern void init_FIQ(int);
21extern int show_fiq_list(struct seq_file *, int); 21extern int show_fiq_list(struct seq_file *, int);
22 22
23#ifdef CONFIG_MULTI_IRQ_HANDLER
24extern void (*handle_arch_irq)(struct pt_regs *);
25extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
26#endif
27
28/* 23/*
29 * This is for easy migration, but should be changed in the source 24 * This is for easy migration, but should be changed in the source
30 */ 25 */
@@ -35,35 +30,4 @@ do { \
35 raw_spin_unlock(&desc->lock); \ 30 raw_spin_unlock(&desc->lock); \
36} while(0) 31} while(0)
37 32
38#ifndef __ASSEMBLY__
39/*
40 * Entry/exit functions for chained handlers where the primary IRQ chip
41 * may implement either fasteoi or level-trigger flow control.
42 */
43static inline void chained_irq_enter(struct irq_chip *chip,
44 struct irq_desc *desc)
45{
46 /* FastEOI controllers require no action on entry. */
47 if (chip->irq_eoi)
48 return;
49
50 if (chip->irq_mask_ack) {
51 chip->irq_mask_ack(&desc->irq_data);
52 } else {
53 chip->irq_mask(&desc->irq_data);
54 if (chip->irq_ack)
55 chip->irq_ack(&desc->irq_data);
56 }
57}
58
59static inline void chained_irq_exit(struct irq_chip *chip,
60 struct irq_desc *desc)
61{
62 if (chip->irq_eoi)
63 chip->irq_eoi(&desc->irq_data);
64 else
65 chip->irq_unmask(&desc->irq_data);
66}
67#endif
68
69#endif 33#endif
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 5cf2e979b4be..7d2c3c843801 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -30,6 +30,11 @@ struct hw_pci {
30 void (*postinit)(void); 30 void (*postinit)(void);
31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin); 31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); 32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
33 resource_size_t (*align_resource)(struct pci_dev *dev,
34 const struct resource *res,
35 resource_size_t start,
36 resource_size_t size,
37 resource_size_t align);
33}; 38};
34 39
35/* 40/*
@@ -51,6 +56,12 @@ struct pci_sys_data {
51 u8 (*swizzle)(struct pci_dev *, u8 *); 56 u8 (*swizzle)(struct pci_dev *, u8 *);
52 /* IRQ mapping */ 57 /* IRQ mapping */
53 int (*map_irq)(const struct pci_dev *, u8, u8); 58 int (*map_irq)(const struct pci_dev *, u8, u8);
59 /* Resource alignement requirements */
60 resource_size_t (*align_resource)(struct pci_dev *dev,
61 const struct resource *res,
62 resource_size_t start,
63 resource_size_t size,
64 resource_size_t align);
54 void *private_data; /* platform controller private data */ 65 void *private_data; /* platform controller private data */
55}; 66};
56 67
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 000000000000..0f7b7620e9a5
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,209 @@
1/*
2 * arch/arm/include/asm/mcpm.h
3 *
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef MCPM_H
13#define MCPM_H
14
15/*
16 * Maximum number of possible clusters / CPUs per cluster.
17 *
18 * This should be sufficient for quite a while, while keeping the
19 * (assembly) code simpler. When this starts to grow then we'll have
20 * to consider dynamic allocation.
21 */
22#define MAX_CPUS_PER_CLUSTER 4
23#define MAX_NR_CLUSTERS 2
24
25#ifndef __ASSEMBLY__
26
27#include <linux/types.h>
28#include <asm/cacheflush.h>
29
30/*
31 * Platform specific code should use this symbol to set up secondary
32 * entry location for processors to use when released from reset.
33 */
34extern void mcpm_entry_point(void);
35
36/*
37 * This is used to indicate where the given CPU from given cluster should
38 * branch once it is ready to re-enter the kernel using ptr, or NULL if it
39 * should be gated. A gated CPU is held in a WFE loop until its vector
40 * becomes non NULL.
41 */
42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
43
44/*
45 * CPU/cluster power operations API for higher subsystems to use.
46 */
47
48/**
49 * mcpm_cpu_power_up - make given CPU in given cluster runable
50 *
51 * @cpu: CPU number within given cluster
52 * @cluster: cluster number for the CPU
53 *
54 * The identified CPU is brought out of reset. If the cluster was powered
55 * down then it is brought up as well, taking care not to let the other CPUs
56 * in the cluster run, and ensuring appropriate cluster setup.
57 *
58 * Caller must ensure the appropriate entry vector is initialized with
59 * mcpm_set_entry_vector() prior to calling this.
60 *
61 * This must be called in a sleepable context. However, the implementation
62 * is strongly encouraged to return early and let the operation happen
63 * asynchronously, especially when significant delays are expected.
64 *
65 * If the operation cannot be performed then an error code is returned.
66 */
67int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
68
69/**
70 * mcpm_cpu_power_down - power the calling CPU down
71 *
72 * The calling CPU is powered down.
73 *
74 * If this CPU is found to be the "last man standing" in the cluster
75 * then the cluster is prepared for power-down too.
76 *
77 * This must be called with interrupts disabled.
78 *
79 * This does not return. Re-entry in the kernel is expected via
80 * mcpm_entry_point.
81 */
82void mcpm_cpu_power_down(void);
83
84/**
85 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
86 *
87 * @expected_residency: duration in microseconds the CPU is expected
88 * to remain suspended, or 0 if unknown/infinity.
89 *
90 * The calling CPU is suspended. The expected residency argument is used
91 * as a hint by the platform specific backend to implement the appropriate
92 * sleep state level according to the knowledge it has on wake-up latency
93 * for the given hardware.
94 *
95 * If this CPU is found to be the "last man standing" in the cluster
96 * then the cluster may be prepared for power-down too, if the expected
97 * residency makes it worthwhile.
98 *
99 * This must be called with interrupts disabled.
100 *
101 * This does not return. Re-entry in the kernel is expected via
102 * mcpm_entry_point.
103 */
104void mcpm_cpu_suspend(u64 expected_residency);
105
106/**
107 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
108 *
109 * This lets the platform specific backend code perform needed housekeeping
110 * work. This must be called by the newly activated CPU as soon as it is
111 * fully operational in kernel space, before it enables interrupts.
112 *
113 * If the operation cannot be performed then an error code is returned.
114 */
115int mcpm_cpu_powered_up(void);
116
117/*
118 * Platform specific methods used in the implementation of the above API.
119 */
120struct mcpm_platform_ops {
121 int (*power_up)(unsigned int cpu, unsigned int cluster);
122 void (*power_down)(void);
123 void (*suspend)(u64);
124 void (*powered_up)(void);
125};
126
127/**
128 * mcpm_platform_register - register platform specific power methods
129 *
130 * @ops: mcpm_platform_ops structure to register
131 *
132 * An error is returned if the registration has been done previously.
133 */
134int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
135
136/* Synchronisation structures for coordinating safe cluster setup/teardown: */
137
138/*
139 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
140 * to match.
141 */
142struct mcpm_sync_struct {
143 /* individual CPU states */
144 struct {
145 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
146 } cpus[MAX_CPUS_PER_CLUSTER];
147
148 /* cluster state */
149 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
150
151 /* inbound-side state */
152 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
153};
154
155struct sync_struct {
156 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
157};
158
159extern unsigned long sync_phys; /* physical address of *mcpm_sync */
160
161void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
162void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
163void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
164bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
165int __mcpm_cluster_state(unsigned int cluster);
166
167int __init mcpm_sync_init(
168 void (*power_up_setup)(unsigned int affinity_level));
169
170void __init mcpm_smp_set_ops(void);
171
172#else
173
174/*
175 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
176 * cannot be included in asm files. Let's work around the conflict like this.
177 */
178#include <asm/asm-offsets.h>
179#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
180
181#endif /* ! __ASSEMBLY__ */
182
183/* Definitions for mcpm_sync_struct */
184#define CPU_DOWN 0x11
185#define CPU_COMING_UP 0x12
186#define CPU_UP 0x13
187#define CPU_GOING_DOWN 0x14
188
189#define CLUSTER_DOWN 0x21
190#define CLUSTER_UP 0x22
191#define CLUSTER_GOING_DOWN 0x23
192
193#define INBOUND_NOT_COMING_UP 0x31
194#define INBOUND_COMING_UP 0x32
195
196/*
197 * Offsets for the mcpm_sync_struct members, for use in asm.
198 * We don't want to make them global to the kernel via asm-offsets.c.
199 */
200#define MCPM_SYNC_CLUSTER_CPUS 0
201#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
202#define MCPM_SYNC_CLUSTER_CLUSTER \
203 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
204#define MCPM_SYNC_CLUSTER_INBOUND \
205 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
206#define MCPM_SYNC_CLUSTER_SIZE \
207 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
208
209#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 6ef8afd1b64c..86b8fe398b95 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
114#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ 114#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
115 115
116/* 116/*
117 * Hyp-mode PL2 PTE definitions for LPAE. 117 * Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 80d6fc4dbe4a..9bcd262a9008 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
61#define FIRST_USER_ADDRESS PAGE_SIZE 61#define FIRST_USER_ADDRESS PAGE_SIZE
62 62
63/* 63/*
64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66 * page shared between user and kernel).
67 */
68#ifdef CONFIG_ARM_LPAE
69#define USER_PGTABLES_CEILING TASK_SIZE
70#endif
71
72/*
64 * The pgprot_* and protection_map entries will be fixed up in runtime 73 * The pgprot_* and protection_map entries will be fixed up in runtime
65 * to include the cachable and bufferable bits based on memory policy, 74 * to include the cachable and bufferable bits based on memory policy,
66 * as well as any architecture dependent bits like global/ASID and SMP 75 * as well as any architecture dependent bits like global/ASID and SMP
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 5a85f148b607..21a23e378bbe 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void);
21 21
22extern unsigned int user_debug; 22extern unsigned int user_debug;
23 23
24extern void disable_hlt(void);
25extern void enable_hlt(void);
26
27#endif /* !__ASSEMBLY__ */ 24#endif /* !__ASSEMBLY__ */
28 25
29#endif /* __ASM_ARM_SYSTEM_MISC_H */ 26#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index cddda1f41f0f..1995d1a84060 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
152#define TIF_SYSCALL_AUDIT 9 152#define TIF_SYSCALL_AUDIT 9
153#define TIF_SYSCALL_TRACEPOINT 10 153#define TIF_SYSCALL_TRACEPOINT 10
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ 154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
155#define TIF_NOHZ 12 /* in adaptive nohz mode */
155#define TIF_USING_IWMMXT 17 156#define TIF_USING_IWMMXT 17
156#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
157#define TIF_RESTORE_SIGMASK 20 158#define TIF_RESTORE_SIGMASK 20
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 9e9c041358ca..a3625d141c1d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
14 14
15#include <asm/glue.h> 15#include <asm/glue.h>
16 16
17#define TLB_V3_PAGE (1 << 0)
18#define TLB_V4_U_PAGE (1 << 1) 17#define TLB_V4_U_PAGE (1 << 1)
19#define TLB_V4_D_PAGE (1 << 2) 18#define TLB_V4_D_PAGE (1 << 2)
20#define TLB_V4_I_PAGE (1 << 3) 19#define TLB_V4_I_PAGE (1 << 3)
@@ -22,7 +21,6 @@
22#define TLB_V6_D_PAGE (1 << 5) 21#define TLB_V6_D_PAGE (1 << 5)
23#define TLB_V6_I_PAGE (1 << 6) 22#define TLB_V6_I_PAGE (1 << 6)
24 23
25#define TLB_V3_FULL (1 << 8)
26#define TLB_V4_U_FULL (1 << 9) 24#define TLB_V4_U_FULL (1 << 9)
27#define TLB_V4_D_FULL (1 << 10) 25#define TLB_V4_D_FULL (1 << 10)
28#define TLB_V4_I_FULL (1 << 11) 26#define TLB_V4_I_FULL (1 << 11)
@@ -52,7 +50,6 @@
52 * ============= 50 * =============
53 * 51 *
54 * We have the following to choose from: 52 * We have the following to choose from:
55 * v3 - ARMv3
56 * v4 - ARMv4 without write buffer 53 * v4 - ARMv4 without write buffer
57 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction 54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
58 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction 55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -169,7 +166,7 @@
169# define v6wbi_always_flags (-1UL) 166# define v6wbi_always_flags (-1UL)
170#endif 167#endif
171 168
172#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 169#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
173 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ 170 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
174 TLB_V7_UIS_ASID | TLB_V7_UIS_BP) 171 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
175#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 172#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
330 if (tlb_flag(TLB_WB)) 327 if (tlb_flag(TLB_WB))
331 dsb(); 328 dsb();
332 329
333 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
334 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); 330 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
335 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); 331 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
336 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); 332 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
351 if (tlb_flag(TLB_WB)) 347 if (tlb_flag(TLB_WB))
352 dsb(); 348 dsb();
353 349
354 if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { 350 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
355 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 351 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
356 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
357 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); 352 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
358 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); 353 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
359 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); 354 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
385 if (tlb_flag(TLB_WB)) 380 if (tlb_flag(TLB_WB))
386 dsb(); 381 dsb();
387 382
388 if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && 383 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
389 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 384 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
390 tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
391 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); 385 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
392 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); 386 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
393 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); 387 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
418 if (tlb_flag(TLB_WB)) 412 if (tlb_flag(TLB_WB))
419 dsb(); 413 dsb();
420 414
421 tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
422 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); 415 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
423 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); 416 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
424 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); 417 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index e4ddfb39ca34..141baa3f9a72 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -44,14 +44,6 @@
44#define __ARCH_WANT_SYS_CLONE 44#define __ARCH_WANT_SYS_CLONE
45 45
46/* 46/*
47 * "Conditional" syscalls
48 *
49 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
50 * but it doesn't work on all toolchains, so we just do it by hand
51 */
52#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
53
54/*
55 * Unimplemented (or alternatively implemented) syscalls 47 * Unimplemented (or alternatively implemented) syscalls
56 */ 48 */
57#define __IGNORE_fadvise64_64 49#define __IGNORE_fadvise64_64
diff --git a/arch/arm/include/debug/bcm2835.S b/arch/arm/include/debug/bcm2835.S
new file mode 100644
index 000000000000..aed9199bd847
--- /dev/null
+++ b/arch/arm/include/debug/bcm2835.S
@@ -0,0 +1,22 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 2010 Broadcom
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#define BCM2835_DEBUG_PHYS 0x20201000
15#define BCM2835_DEBUG_VIRT 0xf0201000
16
17 .macro addruart, rp, rv, tmp
18 ldr \rp, =BCM2835_DEBUG_PHYS
19 ldr \rv, =BCM2835_DEBUG_VIRT
20 .endm
21
22#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/cns3xxx.S b/arch/arm/include/debug/cns3xxx.S
new file mode 100644
index 000000000000..d04c150baa1c
--- /dev/null
+++ b/arch/arm/include/debug/cns3xxx.S
@@ -0,0 +1,19 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright 1994-1999 Russell King
5 * Copyright 2008 Cavium Networks
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This file is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, Version 2, as
10 * published by the Free Software Foundation.
11 */
12
13 .macro addruart,rp,rv,tmp
14 mov \rp, #0x00009000
15 orr \rv, \rp, #0xf0000000 @ virtual base
16 orr \rp, \rp, #0x10000000
17 .endm
18
19#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/exynos.S b/arch/arm/include/debug/exynos.S
new file mode 100644
index 000000000000..b17fdb7fbd34
--- /dev/null
+++ b/arch/arm/include/debug/exynos.S
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10/* pull in the relevant register and map files. */
11
12#define S3C_ADDR_BASE 0xF6000000
13#define S3C_VA_UART S3C_ADDR_BASE + 0x01000000
14#define EXYNOS4_PA_UART 0x13800000
15#define EXYNOS5_PA_UART 0x12C00000
16
17 /* note, for the boot process to work we have to keep the UART
18 * virtual address aligned to an 1MiB boundary for the L1
19 * mapping the head code makes. We keep the UART virtual address
20 * aligned and add in the offset when we load the value here.
21 */
22
23 .macro addruart, rp, rv, tmp
24 mrc p15, 0, \tmp, c0, c0, 0
25 and \tmp, \tmp, #0xf0
26 teq \tmp, #0xf0 @@ A15
27 ldreq \rp, =EXYNOS5_PA_UART
28 movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4
29 ldr \rv, =S3C_VA_UART
30#if CONFIG_DEBUG_S3C_UART != 0
31 add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART)
32 add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART)
33#endif
34 .endm
35
36#define fifo_full fifo_full_s5pv210
37#define fifo_level fifo_level_s5pv210
38
39#include <debug/samsung.S>
diff --git a/arch/arm/include/debug/mvebu.S b/arch/arm/include/debug/mvebu.S
index 865c6d02b332..df191afa3be1 100644
--- a/arch/arm/include/debug/mvebu.S
+++ b/arch/arm/include/debug/mvebu.S
@@ -12,7 +12,7 @@
12*/ 12*/
13 13
14#define ARMADA_370_XP_REGS_PHYS_BASE 0xd0000000 14#define ARMADA_370_XP_REGS_PHYS_BASE 0xd0000000
15#define ARMADA_370_XP_REGS_VIRT_BASE 0xfeb00000 15#define ARMADA_370_XP_REGS_VIRT_BASE 0xfec00000
16 16
17 .macro addruart, rp, rv, tmp 17 .macro addruart, rp, rv, tmp
18 ldr \rp, =ARMADA_370_XP_REGS_PHYS_BASE 18 ldr \rp, =ARMADA_370_XP_REGS_PHYS_BASE
diff --git a/arch/arm/include/debug/mxs.S b/arch/arm/include/debug/mxs.S
new file mode 100644
index 000000000000..d86951551ca1
--- /dev/null
+++ b/arch/arm/include/debug/mxs.S
@@ -0,0 +1,27 @@
1/* arch/arm/mach-mxs/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#ifdef CONFIG_DEBUG_IMX23_UART
15#define UART_PADDR 0x80070000
16#elif defined (CONFIG_DEBUG_IMX28_UART)
17#define UART_PADDR 0x80074000
18#endif
19
20#define UART_VADDR 0xfe100000
21
22 .macro addruart, rp, rv, tmp
23 ldr \rp, =UART_PADDR @ physical
24 ldr \rv, =UART_VADDR @ virtual
25 .endm
26
27#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nomadik.S b/arch/arm/include/debug/nomadik.S
new file mode 100644
index 000000000000..735417922ce2
--- /dev/null
+++ b/arch/arm/include/debug/nomadik.S
@@ -0,0 +1,20 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 1994-1999 Russell King
5 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11*/
12
13 .macro addruart, rp, rv, tmp
14 mov \rp, #0x00100000
15 add \rp, \rp, #0x000fb000
16 add \rv, \rp, #0xf0000000 @ virtual base
17 add \rp, \rp, #0x10000000 @ physical base address
18 .endm
19
20#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/pxa.S b/arch/arm/include/debug/pxa.S
new file mode 100644
index 000000000000..e1e795aa3d7f
--- /dev/null
+++ b/arch/arm/include/debug/pxa.S
@@ -0,0 +1,33 @@
1/*
2 * Early serial output macro for Marvell PXA/MMP SoC
3 *
4 * Copyright (C) 1994-1999 Russell King
5 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
6 *
7 * Copyright (C) 2013 Haojian Zhuang
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#if defined(CONFIG_DEBUG_PXA_UART1)
15#define PXA_UART_REG_PHYS_BASE 0x40100000
16#define PXA_UART_REG_VIRT_BASE 0xf2100000
17#elif defined(CONFIG_DEBUG_MMP_UART2)
18#define PXA_UART_REG_PHYS_BASE 0xd4017000
19#define PXA_UART_REG_VIRT_BASE 0xfe017000
20#elif defined(CONFIG_DEBUG_MMP_UART3)
21#define PXA_UART_REG_PHYS_BASE 0xd4018000
22#define PXA_UART_REG_VIRT_BASE 0xfe018000
23#else
24#error "Select uart for DEBUG_LL"
25#endif
26
27 .macro addruart, rp, rv, tmp
28 ldr \rp, =PXA_UART_REG_PHYS_BASE
29 ldr \rv, =PXA_UART_REG_VIRT_BASE
30 .endm
31
32#define UART_SHIFT 2
33#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/samsung.S b/arch/arm/include/debug/samsung.S
new file mode 100644
index 000000000000..f3a9cff6d5d4
--- /dev/null
+++ b/arch/arm/include/debug/samsung.S
@@ -0,0 +1,87 @@
1/* arch/arm/plat-samsung/include/plat/debug-macro.S
2 *
3 * Copyright 2005, 2007 Simtec Electronics
4 * http://armlinux.simtec.co.uk/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <plat/regs-serial.h>
13
14/* The S5PV210/S5PC110 implementations are as belows. */
15
16 .macro fifo_level_s5pv210 rd, rx
17 ldr \rd, [\rx, # S3C2410_UFSTAT]
18 and \rd, \rd, #S5PV210_UFSTAT_TXMASK
19 .endm
20
21 .macro fifo_full_s5pv210 rd, rx
22 ldr \rd, [\rx, # S3C2410_UFSTAT]
23 tst \rd, #S5PV210_UFSTAT_TXFULL
24 .endm
25
26/* The S3C2440 implementations are used by default as they are the
27 * most widely re-used */
28
29 .macro fifo_level_s3c2440 rd, rx
30 ldr \rd, [\rx, # S3C2410_UFSTAT]
31 and \rd, \rd, #S3C2440_UFSTAT_TXMASK
32 .endm
33
34#ifndef fifo_level
35#define fifo_level fifo_level_s3c2440
36#endif
37
38 .macro fifo_full_s3c2440 rd, rx
39 ldr \rd, [\rx, # S3C2410_UFSTAT]
40 tst \rd, #S3C2440_UFSTAT_TXFULL
41 .endm
42
43#ifndef fifo_full
44#define fifo_full fifo_full_s3c2440
45#endif
46
47 .macro senduart,rd,rx
48 strb \rd, [\rx, # S3C2410_UTXH]
49 .endm
50
51 .macro busyuart, rd, rx
52 ldr \rd, [\rx, # S3C2410_UFCON]
53 tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
54 beq 1001f @
55 @ FIFO enabled...
561003:
57 fifo_full \rd, \rx
58 bne 1003b
59 b 1002f
60
611001:
62 @ busy waiting for non fifo
63 ldr \rd, [\rx, # S3C2410_UTRSTAT]
64 tst \rd, #S3C2410_UTRSTAT_TXFE
65 beq 1001b
66
671002: @ exit busyuart
68 .endm
69
70 .macro waituart,rd,rx
71 ldr \rd, [\rx, # S3C2410_UFCON]
72 tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
73 beq 1001f @
74 @ FIFO enabled...
751003:
76 fifo_level \rd, \rx
77 teq \rd, #0
78 bne 1003b
79 b 1002f
801001:
81 @ idle waiting for non fifo
82 ldr \rd, [\rx, # S3C2410_UTRSTAT]
83 tst \rd, #S3C2410_UTRSTAT_TXFE
84 beq 1001b
85
861002: @ exit busyuart
87 .endm
diff --git a/arch/arm/include/debug/sirf.S b/arch/arm/include/debug/sirf.S
new file mode 100644
index 000000000000..dbf250cf18e6
--- /dev/null
+++ b/arch/arm/include/debug/sirf.S
@@ -0,0 +1,42 @@
1/*
2 * arch/arm/mach-prima2/include/mach/debug-macro.S
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#if defined(CONFIG_DEBUG_SIRFPRIMA2_UART1)
10#define SIRFSOC_UART1_PA_BASE 0xb0060000
11#elif defined(CONFIG_DEBUG_SIRFMARCO_UART1)
12#define SIRFSOC_UART1_PA_BASE 0xcc060000
13#else
14#define SIRFSOC_UART1_PA_BASE 0
15#endif
16
17#define SIRFSOC_UART1_VA_BASE 0xFEC60000
18
19#define SIRFSOC_UART_TXFIFO_STATUS 0x0114
20#define SIRFSOC_UART_TXFIFO_DATA 0x0118
21
22#define SIRFSOC_UART1_TXFIFO_FULL (1 << 5)
23#define SIRFSOC_UART1_TXFIFO_EMPTY (1 << 6)
24
25 .macro addruart, rp, rv, tmp
26 ldr \rp, =SIRFSOC_UART1_PA_BASE @ physical
27 ldr \rv, =SIRFSOC_UART1_VA_BASE @ virtual
28 .endm
29
30 .macro senduart,rd,rx
31 str \rd, [\rx, #SIRFSOC_UART_TXFIFO_DATA]
32 .endm
33
34 .macro busyuart,rd,rx
35 .endm
36
37 .macro waituart,rd,rx
381001: ldr \rd, [\rx, #SIRFSOC_UART_TXFIFO_STATUS]
39 tst \rd, #SIRFSOC_UART1_TXFIFO_EMPTY
40 beq 1001b
41 .endm
42
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h
new file mode 100644
index 000000000000..0e2949b0fae9
--- /dev/null
+++ b/arch/arm/include/debug/uncompress.h
@@ -0,0 +1,7 @@
1#ifdef CONFIG_DEBUG_UNCOMPRESS
2extern void putc(int c);
3#else
4static inline void putc(int c) {}
5#endif
6static inline void flush(void) {}
7static inline void arch_decomp_setup(void) {}
diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S
new file mode 100644
index 000000000000..2848857f5b62
--- /dev/null
+++ b/arch/arm/include/debug/ux500.S
@@ -0,0 +1,48 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 2009 ST-Ericsson
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12
13#if CONFIG_UX500_DEBUG_UART > 2
14#error Invalid Ux500 debug UART
15#endif
16
17/*
18 * DEBUG_LL only works if only one SOC is built in. We don't use #else below
19 * in order to get "__UX500_UART redefined" warnings if more than one SOC is
20 * built, so that there's some hint during the build that something is wrong.
21 */
22
23#ifdef CONFIG_UX500_SOC_DB8500
24#define U8500_UART0_PHYS_BASE (0x80120000)
25#define U8500_UART1_PHYS_BASE (0x80121000)
26#define U8500_UART2_PHYS_BASE (0x80007000)
27#define U8500_UART0_VIRT_BASE (0xa8120000)
28#define U8500_UART1_VIRT_BASE (0xa8121000)
29#define U8500_UART2_VIRT_BASE (0xa8007000)
30#define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE
31#define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE
32#endif
33
34#if !defined(__UX500_PHYS_UART) || !defined(__UX500_VIRT_UART)
35#error Unknown SOC
36#endif
37
38#define UX500_PHYS_UART(n) __UX500_PHYS_UART(n)
39#define UX500_VIRT_UART(n) __UX500_VIRT_UART(n)
40#define UART_PHYS_BASE UX500_PHYS_UART(CONFIG_UX500_DEBUG_UART)
41#define UART_VIRT_BASE UX500_VIRT_UART(CONFIG_UX500_DEBUG_UART)
42
43 .macro addruart, rp, rv, tmp
44 ldr \rp, =UART_PHYS_BASE @ no, physical address
45 ldr \rv, =UART_VIRT_BASE @ yes, virtual address
46 .endm
47
48#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 023bfeb367bf..c1ee007523d7 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -53,12 +53,12 @@
53#define KVM_ARM_FIQ_spsr fiq_regs[7] 53#define KVM_ARM_FIQ_spsr fiq_regs[7]
54 54
55struct kvm_regs { 55struct kvm_regs {
56 struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ 56 struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
57 __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ 57 unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
58 __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ 58 unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
59 __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ 59 unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
60 __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ 60 unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
61 __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ 61 unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
62}; 62};
63 63
64/* Supported Processor Types */ 64/* Supported Processor Types */