aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 12:13:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 12:13:19 -0400
commit8546dc1d4b671480961c3eaf4c0c102ae6848340 (patch)
treec646079fb48811b22b742deb6bd2e907f9e6c3d4 /arch/arm/include
parent9992ba72327fa0d8bdc9fb624e80f5cce338a711 (diff)
parent33b9f582c5c1db515412cc7efff28f7d1779321f (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "The major items included in here are: - MCPM, multi-cluster power management, part of the infrastructure required for ARMs big.LITTLE support. - A rework of the ARM KVM code to allow re-use by ARM64. - Error handling cleanups of the IS_ERR_OR_NULL() madness and fixes of that stuff for arch/arm - Preparatory patches for Cortex-M3 support from Uwe Kleine-König. There is also a set of three patches in here from Hugh/Catalin to address freeing of inappropriate page tables on LPAE. You already have these from akpm, but they were already part of my tree at the time he sent them, so unfortunately they'll end up with duplicate commits" * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (77 commits) ARM: EXYNOS: remove unnecessary use of IS_ERR_VALUE() ARM: IMX: remove unnecessary use of IS_ERR_VALUE() ARM: OMAP: use consistent error checking ARM: cleanup: OMAP hwmod error checking ARM: 7709/1: mcpm: Add explicit AFLAGS to support v6/v7 multiplatform kernels ARM: 7700/2: Make cpu_init() notrace ARM: 7702/1: Set the page table freeing ceiling to TASK_SIZE ARM: 7701/1: mm: Allow arch code to control the user page table ceiling ARM: 7703/1: Disable preemption in broadcast_tlb*_a15_erratum() ARM: mcpm: provide an interface to set the SMP ops at run time ARM: mcpm: generic SMP secondary bringup and hotplug support ARM: mcpm_head.S: vlock-based first man election ARM: mcpm: Add baremetal voting mutexes ARM: mcpm: introduce helpers for platform coherency exit/setup ARM: mcpm: introduce the CPU/cluster power API ARM: multi-cluster PM: secondary kernel entry code ARM: cacheflush: add synchronization helpers for mixed cache state accesses ARM: cpu hotplug: remove majority of cache flushing from platforms ARM: smp: flush L1 cache in cpu_die() ARM: tegra: remove tegra specific cpu_disable() ...
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h75
-rw-r--r--arch/arm/include/asm/cp15.h16
-rw-r--r--arch/arm/include/asm/cputype.h61
-rw-r--r--arch/arm/include/asm/glue-df.h20
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
-rw-r--r--arch/arm/include/asm/kvm_host.h42
-rw-r--r--arch/arm/include/asm/kvm_mmu.h67
-rw-r--r--arch/arm/include/asm/kvm_vgic.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h11
-rw-r--r--arch/arm/include/asm/mcpm.h209
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h2
-rw-r--r--arch/arm/include/debug/uncompress.h7
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
17 files changed, 607 insertions, 54 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index c79f61faa3a5..da1c77d39327 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,6 +243,29 @@ typedef struct {
243 243
244#define ATOMIC64_INIT(i) { (i) } 244#define ATOMIC64_INIT(i) { (i) }
245 245
246#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v)
248{
249 u64 result;
250
251 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]"
253 : "=&r" (result)
254 : "r" (&v->counter), "Qo" (v->counter)
255 );
256
257 return result;
258}
259
260static inline void atomic64_set(atomic64_t *v, u64 i)
261{
262 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]"
264 : "=Qo" (v->counter)
265 : "r" (&v->counter), "r" (i)
266 );
267}
268#else
246static inline u64 atomic64_read(const atomic64_t *v) 269static inline u64 atomic64_read(const atomic64_t *v)
247{ 270{
248 u64 result; 271 u64 result;
@@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
269 : "r" (&v->counter), "r" (i) 292 : "r" (&v->counter), "r" (i)
270 : "cc"); 293 : "cc");
271} 294}
295#endif
272 296
273static inline void atomic64_add(u64 i, atomic64_t *v) 297static inline void atomic64_add(u64 i, atomic64_t *v)
274{ 298{
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd12..bff71388e72a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
363 flush_cache_all(); 363 flush_cache_all();
364} 364}
365 365
366/*
367 * Memory synchronization helpers for mixed cached vs non cached accesses.
368 *
369 * Some synchronization algorithms have to set states in memory with the
370 * cache enabled or disabled depending on the code path. It is crucial
371 * to always ensure proper cache maintenance to update main memory right
372 * away in that case.
373 *
374 * Any cached write must be followed by a cache clean operation.
375 * Any cached read must be preceded by a cache invalidate operation.
376 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
377 * operation is needed to avoid discarding possible concurrent writes to the
378 * accessed memory.
379 *
380 * Also, in order to prevent a cached writer from interfering with an
381 * adjacent non-cached writer, each state variable must be located to
382 * a separate cache line.
383 */
384
385/*
386 * This needs to be >= the max cache writeback size of all
387 * supported platforms included in the current kernel configuration.
388 * This is used to align state variables to their own cache lines.
389 */
390#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
391#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
392
393/*
394 * There is no __cpuc_clean_dcache_area but we use it anyway for
395 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
396 */
397#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
398
399/*
400 * Ensure preceding writes to *p by this CPU are visible to
401 * subsequent reads by other CPUs:
402 */
403static inline void __sync_cache_range_w(volatile void *p, size_t size)
404{
405 char *_p = (char *)p;
406
407 __cpuc_clean_dcache_area(_p, size);
408 outer_clean_range(__pa(_p), __pa(_p + size));
409}
410
411/*
412 * Ensure preceding writes to *p by other CPUs are visible to
413 * subsequent reads by this CPU. We must be careful not to
414 * discard data simultaneously written by another CPU, hence the
415 * usage of flush rather than invalidate operations.
416 */
417static inline void __sync_cache_range_r(volatile void *p, size_t size)
418{
419 char *_p = (char *)p;
420
421#ifdef CONFIG_OUTER_CACHE
422 if (outer_cache.flush_range) {
423 /*
424 * Ensure dirty data migrated from other CPUs into our cache
425 * are cleaned out safely before the outer cache is cleaned:
426 */
427 __cpuc_clean_dcache_area(_p, size);
428
429 /* Clean and invalidate stale data for *p from outer ... */
430 outer_flush_range(__pa(_p), __pa(_p + size));
431 }
432#endif
433
434 /* ... and inner cache: */
435 __cpuc_flush_dcache_area(_p, size);
436}
437
438#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
439#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
440
366#endif 441#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a60..1f3262e99d81 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -42,6 +42,8 @@
42#define vectors_high() (0) 42#define vectors_high() (0)
43#endif 43#endif
44 44
45#ifdef CONFIG_CPU_CP15
46
45extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 47extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
46extern unsigned long cr_alignment; /* defined in entry-armv.S */ 48extern unsigned long cr_alignment; /* defined in entry-armv.S */
47 49
@@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val)
82 isb(); 84 isb();
83} 85}
84 86
85#endif 87#else /* ifdef CONFIG_CPU_CP15 */
88
89/*
90 * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
91 * minds of the developers). Yielding 0 for machines without a cp15 (and making
92 * it read-only) is fine for most cases and saves quite some #ifdeffery.
93 */
94#define cr_no_alignment UL(0)
95#define cr_alignment UL(0)
96
97#endif /* ifdef CONFIG_CPU_CP15 / else */
98
99#endif /* ifndef __ASSEMBLY__ */
86 100
87#endif 101#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ad41ec2471e8..7652712d1d14 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -38,6 +38,24 @@
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ 38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) 39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40 40
41#define ARM_CPU_IMP_ARM 0x41
42#define ARM_CPU_IMP_INTEL 0x69
43
44#define ARM_CPU_PART_ARM1136 0xB360
45#define ARM_CPU_PART_ARM1156 0xB560
46#define ARM_CPU_PART_ARM1176 0xB760
47#define ARM_CPU_PART_ARM11MPCORE 0xB020
48#define ARM_CPU_PART_CORTEX_A8 0xC080
49#define ARM_CPU_PART_CORTEX_A9 0xC090
50#define ARM_CPU_PART_CORTEX_A5 0xC050
51#define ARM_CPU_PART_CORTEX_A15 0xC0F0
52#define ARM_CPU_PART_CORTEX_A7 0xC070
53
54#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
55#define ARM_CPU_XSCALE_ARCH_V1 0x2000
56#define ARM_CPU_XSCALE_ARCH_V2 0x4000
57#define ARM_CPU_XSCALE_ARCH_V3 0x6000
58
41extern unsigned int processor_id; 59extern unsigned int processor_id;
42 60
43#ifdef CONFIG_CPU_CP15 61#ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
50 : "cc"); \ 68 : "cc"); \
51 __val; \ 69 __val; \
52 }) 70 })
71
53#define read_cpuid_ext(ext_reg) \ 72#define read_cpuid_ext(ext_reg) \
54 ({ \ 73 ({ \
55 unsigned int __val; \ 74 unsigned int __val; \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
59 : "cc"); \ 78 : "cc"); \
60 __val; \ 79 __val; \
61 }) 80 })
62#else
63#define read_cpuid(reg) (processor_id)
64#define read_cpuid_ext(reg) 0
65#endif
66 81
67#define ARM_CPU_IMP_ARM 0x41 82#else /* ifdef CONFIG_CPU_CP15 */
68#define ARM_CPU_IMP_INTEL 0x69
69 83
70#define ARM_CPU_PART_ARM1136 0xB360 84/*
71#define ARM_CPU_PART_ARM1156 0xB560 85 * read_cpuid and read_cpuid_ext should only ever be called on machines that
72#define ARM_CPU_PART_ARM1176 0xB760 86 * have cp15 so warn on other usages.
73#define ARM_CPU_PART_ARM11MPCORE 0xB020 87 */
74#define ARM_CPU_PART_CORTEX_A8 0xC080 88#define read_cpuid(reg) \
75#define ARM_CPU_PART_CORTEX_A9 0xC090 89 ({ \
76#define ARM_CPU_PART_CORTEX_A5 0xC050 90 WARN_ON_ONCE(1); \
77#define ARM_CPU_PART_CORTEX_A15 0xC0F0 91 0; \
78#define ARM_CPU_PART_CORTEX_A7 0xC070 92 })
79 93
80#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 94#define read_cpuid_ext(reg) read_cpuid(reg)
81#define ARM_CPU_XSCALE_ARCH_V1 0x2000 95
82#define ARM_CPU_XSCALE_ARCH_V2 0x4000 96#endif /* ifdef CONFIG_CPU_CP15 / else */
83#define ARM_CPU_XSCALE_ARCH_V3 0x6000
84 97
98#ifdef CONFIG_CPU_CP15
85/* 99/*
86 * The CPU ID never changes at run time, so we might as well tell the 100 * The CPU ID never changes at run time, so we might as well tell the
87 * compiler that it's constant. Use this function to read the CPU ID 101 * compiler that it's constant. Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
92 return read_cpuid(CPUID_ID); 106 return read_cpuid(CPUID_ID);
93} 107}
94 108
109#else /* ifdef CONFIG_CPU_CP15 */
110
111static inline unsigned int __attribute_const__ read_cpuid_id(void)
112{
113 return processor_id;
114}
115
116#endif /* ifdef CONFIG_CPU_CP15 / else */
117
95static inline unsigned int __attribute_const__ read_cpuid_implementor(void) 118static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
96{ 119{
97 return (read_cpuid_id() & 0xFF000000) >> 24; 120 return (read_cpuid_id() & 0xFF000000) >> 24;
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76da..b6e9f2c108b5 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
18 * ================ 18 * ================
19 * 19 *
20 * We have the following to choose from: 20 * We have the following to choose from:
21 * arm6 - ARM6 style
22 * arm7 - ARM7 style 21 * arm7 - ARM7 style
23 * v4_early - ARMv4 without Thumb early abort handler 22 * v4_early - ARMv4 without Thumb early abort handler
24 * v4t_late - ARMv4 with Thumb late abort handler 23 * v4t_late - ARMv4 with Thumb late abort handler
25 * v4t_early - ARMv4 with Thumb early abort handler 24 * v4t_early - ARMv4 with Thumb early abort handler
26 * v5tej_early - ARMv5 with Thumb and Java early abort handler 25 * v5t_early - ARMv5 with Thumb early abort handler
26 * v5tj_early - ARMv5 with Thumb and Java early abort handler
27 * xscale - ARMv5 with Thumb with Xscale extensions 27 * xscale - ARMv5 with Thumb with Xscale extensions
28 * v6_early - ARMv6 generic early abort handler 28 * v6_early - ARMv6 generic early abort handler
29 * v7_early - ARMv7 generic early abort handler 29 * v7_early - ARMv7 generic early abort handler
@@ -39,19 +39,19 @@
39# endif 39# endif
40#endif 40#endif
41 41
42#ifdef CONFIG_CPU_ABRT_LV4T 42#ifdef CONFIG_CPU_ABRT_EV4
43# ifdef CPU_DABORT_HANDLER 43# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1 44# define MULTI_DABORT 1
45# else 45# else
46# define CPU_DABORT_HANDLER v4t_late_abort 46# define CPU_DABORT_HANDLER v4_early_abort
47# endif 47# endif
48#endif 48#endif
49 49
50#ifdef CONFIG_CPU_ABRT_EV4 50#ifdef CONFIG_CPU_ABRT_LV4T
51# ifdef CPU_DABORT_HANDLER 51# ifdef CPU_DABORT_HANDLER
52# define MULTI_DABORT 1 52# define MULTI_DABORT 1
53# else 53# else
54# define CPU_DABORT_HANDLER v4_early_abort 54# define CPU_DABORT_HANDLER v4t_late_abort
55# endif 55# endif
56#endif 56#endif
57 57
@@ -63,19 +63,19 @@
63# endif 63# endif
64#endif 64#endif
65 65
66#ifdef CONFIG_CPU_ABRT_EV5TJ 66#ifdef CONFIG_CPU_ABRT_EV5T
67# ifdef CPU_DABORT_HANDLER 67# ifdef CPU_DABORT_HANDLER
68# define MULTI_DABORT 1 68# define MULTI_DABORT 1
69# else 69# else
70# define CPU_DABORT_HANDLER v5tj_early_abort 70# define CPU_DABORT_HANDLER v5t_early_abort
71# endif 71# endif
72#endif 72#endif
73 73
74#ifdef CONFIG_CPU_ABRT_EV5T 74#ifdef CONFIG_CPU_ABRT_EV5TJ
75# ifdef CPU_DABORT_HANDLER 75# ifdef CPU_DABORT_HANDLER
76# define MULTI_DABORT 1 76# define MULTI_DABORT 1
77# else 77# else
78# define CPU_DABORT_HANDLER v5t_early_abort 78# define CPU_DABORT_HANDLER v5tj_early_abort
79# endif 79# endif
80#endif 80#endif
81 81
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 7c3d813e15df..124623e5ef14 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -211,4 +211,8 @@
211 211
212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) 212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
213 213
214#define HSR_DABT_S1PTW (1U << 7)
215#define HSR_DABT_CM (1U << 8)
216#define HSR_DABT_EA (1U << 9)
217
214#endif /* __ARM_KVM_ARM_H__ */ 218#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e1..18d50322a9e2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
76 76
77extern void __kvm_flush_vm_context(void); 77extern void __kvm_flush_vm_context(void);
78extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
79 79
80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
81#endif 81#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb5..82b4babead2c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/kvm_arm.h>
25 26
26u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 27unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
27u32 *vcpu_spsr(struct kvm_vcpu *vcpu); 28unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
28 29
29int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); 30bool kvm_condition_valid(struct kvm_vcpu *vcpu);
30void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 31void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
31void kvm_inject_undefined(struct kvm_vcpu *vcpu); 32void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
37 return 1; 38 return 1;
38} 39}
39 40
40static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) 41static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
41{ 42{
42 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; 43 return &vcpu->arch.regs.usr_regs.ARM_pc;
43} 44}
44 45
45static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) 46static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
46{ 47{
47 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; 48 return &vcpu->arch.regs.usr_regs.ARM_cpsr;
48} 49}
49 50
50static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 51static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
69 return reg == 15; 70 return reg == 15;
70} 71}
71 72
73static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
74{
75 return vcpu->arch.fault.hsr;
76}
77
78static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
79{
80 return vcpu->arch.fault.hxfar;
81}
82
83static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
84{
85 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
86}
87
88static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
89{
90 return vcpu->arch.fault.hyp_pc;
91}
92
93static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
94{
95 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
96}
97
98static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
99{
100 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
101}
102
103static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
104{
105 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
106}
107
108static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
109{
110 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
111}
112
113static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
114{
115 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
116}
117
118static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
119{
120 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
121}
122
123/* Get Access Size from a data abort */
124static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
125{
126 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
127 case 0:
128 return 1;
129 case 1:
130 return 2;
131 case 2:
132 return 4;
133 default:
134 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
135 return -EFAULT;
136 }
137}
138
139/* This one is not specific to Data Abort */
140static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
141{
142 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
143}
144
145static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
146{
147 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
148}
149
150static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
151{
152 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
153}
154
155static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
156{
157 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
158}
159
160static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
161{
162 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
163}
164
72#endif /* __ARM_KVM_EMULATE_H__ */ 165#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d1736a53b12d..0c4e643d939e 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
80 void *objects[KVM_NR_MEM_OBJS]; 80 void *objects[KVM_NR_MEM_OBJS];
81}; 81};
82 82
83struct kvm_vcpu_fault_info {
84 u32 hsr; /* Hyp Syndrome Register */
85 u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
86 u32 hpfar; /* Hyp IPA Fault Address Register */
87 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
88};
89
90typedef struct vfp_hard_struct kvm_kernel_vfp_t;
91
83struct kvm_vcpu_arch { 92struct kvm_vcpu_arch {
84 struct kvm_regs regs; 93 struct kvm_regs regs;
85 94
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
93 u32 midr; 102 u32 midr;
94 103
95 /* Exception Information */ 104 /* Exception Information */
96 u32 hsr; /* Hyp Syndrome Register */ 105 struct kvm_vcpu_fault_info fault;
97 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
98 u32 hpfar; /* Hyp IPA Fault Address Register */
99 106
100 /* Floating point registers (VFP and Advanced SIMD/NEON) */ 107 /* Floating point registers (VFP and Advanced SIMD/NEON) */
101 struct vfp_hard_struct vfp_guest; 108 kvm_kernel_vfp_t vfp_guest;
102 struct vfp_hard_struct *vfp_host; 109 kvm_kernel_vfp_t *vfp_host;
103 110
104 /* VGIC state */ 111 /* VGIC state */
105 struct vgic_cpu vgic_cpu; 112 struct vgic_cpu vgic_cpu;
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
122 /* Interrupt related fields */ 129 /* Interrupt related fields */
123 u32 irq_lines; /* IRQ and FIQ levels */ 130 u32 irq_lines; /* IRQ and FIQ levels */
124 131
125 /* Hyp exception information */
126 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
127
128 /* Cache some mmu pages needed inside spinlock regions */ 132 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache; 133 struct kvm_mmu_memory_cache mmu_page_cache;
130 134
@@ -181,4 +185,26 @@ struct kvm_one_reg;
181int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 185int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
182int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 186int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
183 187
188int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
189 int exception_index);
190
191static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
192 unsigned long hyp_stack_ptr,
193 unsigned long vector_ptr)
194{
195 unsigned long pgd_low, pgd_high;
196
197 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
198 pgd_high = (pgd_ptr >> 32ULL);
199
200 /*
201 * Call initialization code, and switch to the full blown
202 * HYP code. The init code doesn't need to preserve these registers as
203 * r1-r3 and r12 are already callee save according to the AAPCS.
204 * Note that we slightly misuse the prototype by casing the pgd_low to
205 * a void *.
206 */
207 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
208}
209
184#endif /* __ARM_KVM_HOST_H__ */ 210#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b34874..970f3b5fa109 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,18 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/cacheflush.h>
23#include <asm/pgalloc.h>
24#include <asm/idmap.h>
25
26/*
27 * We directly use the kernel VA for the HYP, as we can directly share
28 * the mapping (HTTBR "covers" TTBR1).
29 */
30#define HYP_PAGE_OFFSET_MASK (~0UL)
31#define HYP_PAGE_OFFSET PAGE_OFFSET
32#define KERN_TO_HYP(kva) (kva)
33
22int create_hyp_mappings(void *from, void *to); 34int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 35int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 36void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void); 48int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void); 49void kvm_clear_hyp_idmap(void);
38 50
51static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
52{
53 pte_val(*pte) = new_pte;
54 /*
55 * flush_pmd_entry just takes a void pointer and cleans the necessary
56 * cache entries, so we can reuse the function for ptes.
57 */
58 flush_pmd_entry(pte);
59}
60
39static inline bool kvm_is_write_fault(unsigned long hsr) 61static inline bool kvm_is_write_fault(unsigned long hsr)
40{ 62{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; 63 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
47 return true; 69 return true;
48} 70}
49 71
72static inline void kvm_clean_pgd(pgd_t *pgd)
73{
74 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
75}
76
77static inline void kvm_clean_pmd_entry(pmd_t *pmd)
78{
79 clean_pmd_entry(pmd);
80}
81
82static inline void kvm_clean_pte(pte_t *pte)
83{
84 clean_pte_table(pte);
85}
86
87static inline void kvm_set_s2pte_writable(pte_t *pte)
88{
89 pte_val(*pte) |= L_PTE_S2_RDWR;
90}
91
92struct kvm;
93
94static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
95{
96 /*
97 * If we are going to insert an instruction page and the icache is
98 * either VIPT or PIPT, there is a potential problem where the host
99 * (or another VM) may have used the same page as this guest, and we
100 * read incorrect data from the icache. If we're using a PIPT cache,
101 * we can invalidate just that page, but if we are using a VIPT cache
102 * we need to invalidate the entire icache - damn shame - as written
103 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
104 *
105 * VIVT caches are tagged using both the ASID and the VMID and doesn't
106 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
107 */
108 if (icache_is_pipt()) {
109 unsigned long hva = gfn_to_hva(kvm, gfn);
110 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
111 } else if (!icache_is_vivt_asid_tagged()) {
112 /* any kind of VIPT cache */
113 __flush_icache_all();
114 }
115}
116
50#endif /* __ARM_KVM_MMU_H__ */ 117#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index ab97207d9cd3..343744e4809c 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kvm.h> 23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/irqreturn.h> 24#include <linux/irqreturn.h>
26#include <linux/spinlock.h> 25#include <linux/spinlock.h>
27#include <linux/types.h> 26#include <linux/types.h>
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 5cf2e979b4be..7d2c3c843801 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -30,6 +30,11 @@ struct hw_pci {
30 void (*postinit)(void); 30 void (*postinit)(void);
31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin); 31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); 32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
33 resource_size_t (*align_resource)(struct pci_dev *dev,
34 const struct resource *res,
35 resource_size_t start,
36 resource_size_t size,
37 resource_size_t align);
33}; 38};
34 39
35/* 40/*
@@ -51,6 +56,12 @@ struct pci_sys_data {
51 u8 (*swizzle)(struct pci_dev *, u8 *); 56 u8 (*swizzle)(struct pci_dev *, u8 *);
52 /* IRQ mapping */ 57 /* IRQ mapping */
53 int (*map_irq)(const struct pci_dev *, u8, u8); 58 int (*map_irq)(const struct pci_dev *, u8, u8);
59 /* Resource alignement requirements */
60 resource_size_t (*align_resource)(struct pci_dev *dev,
61 const struct resource *res,
62 resource_size_t start,
63 resource_size_t size,
64 resource_size_t align);
54 void *private_data; /* platform controller private data */ 65 void *private_data; /* platform controller private data */
55}; 66};
56 67
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 000000000000..0f7b7620e9a5
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,209 @@
1/*
2 * arch/arm/include/asm/mcpm.h
3 *
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef MCPM_H
13#define MCPM_H
14
15/*
16 * Maximum number of possible clusters / CPUs per cluster.
17 *
18 * This should be sufficient for quite a while, while keeping the
19 * (assembly) code simpler. When this starts to grow then we'll have
20 * to consider dynamic allocation.
21 */
22#define MAX_CPUS_PER_CLUSTER 4
23#define MAX_NR_CLUSTERS 2
24
25#ifndef __ASSEMBLY__
26
27#include <linux/types.h>
28#include <asm/cacheflush.h>
29
30/*
31 * Platform specific code should use this symbol to set up secondary
32 * entry location for processors to use when released from reset.
33 */
34extern void mcpm_entry_point(void);
35
36/*
37 * This is used to indicate where the given CPU from given cluster should
38 * branch once it is ready to re-enter the kernel using ptr, or NULL if it
39 * should be gated. A gated CPU is held in a WFE loop until its vector
40 * becomes non NULL.
41 */
42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
43
44/*
45 * CPU/cluster power operations API for higher subsystems to use.
46 */
47
48/**
49 * mcpm_cpu_power_up - make given CPU in given cluster runable
50 *
51 * @cpu: CPU number within given cluster
52 * @cluster: cluster number for the CPU
53 *
54 * The identified CPU is brought out of reset. If the cluster was powered
55 * down then it is brought up as well, taking care not to let the other CPUs
56 * in the cluster run, and ensuring appropriate cluster setup.
57 *
58 * Caller must ensure the appropriate entry vector is initialized with
59 * mcpm_set_entry_vector() prior to calling this.
60 *
61 * This must be called in a sleepable context. However, the implementation
62 * is strongly encouraged to return early and let the operation happen
63 * asynchronously, especially when significant delays are expected.
64 *
65 * If the operation cannot be performed then an error code is returned.
66 */
67int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
68
69/**
70 * mcpm_cpu_power_down - power the calling CPU down
71 *
72 * The calling CPU is powered down.
73 *
74 * If this CPU is found to be the "last man standing" in the cluster
75 * then the cluster is prepared for power-down too.
76 *
77 * This must be called with interrupts disabled.
78 *
79 * This does not return. Re-entry in the kernel is expected via
80 * mcpm_entry_point.
81 */
82void mcpm_cpu_power_down(void);
83
84/**
85 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
86 *
87 * @expected_residency: duration in microseconds the CPU is expected
88 * to remain suspended, or 0 if unknown/infinity.
89 *
90 * The calling CPU is suspended. The expected residency argument is used
91 * as a hint by the platform specific backend to implement the appropriate
92 * sleep state level according to the knowledge it has on wake-up latency
93 * for the given hardware.
94 *
95 * If this CPU is found to be the "last man standing" in the cluster
96 * then the cluster may be prepared for power-down too, if the expected
97 * residency makes it worthwhile.
98 *
99 * This must be called with interrupts disabled.
100 *
101 * This does not return. Re-entry in the kernel is expected via
102 * mcpm_entry_point.
103 */
104void mcpm_cpu_suspend(u64 expected_residency);
105
106/**
107 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
108 *
109 * This lets the platform specific backend code perform needed housekeeping
110 * work. This must be called by the newly activated CPU as soon as it is
111 * fully operational in kernel space, before it enables interrupts.
112 *
113 * If the operation cannot be performed then an error code is returned.
114 */
115int mcpm_cpu_powered_up(void);
116
117/*
118 * Platform specific methods used in the implementation of the above API.
119 */
120struct mcpm_platform_ops {
121 int (*power_up)(unsigned int cpu, unsigned int cluster);
122 void (*power_down)(void);
123 void (*suspend)(u64);
124 void (*powered_up)(void);
125};
126
127/**
128 * mcpm_platform_register - register platform specific power methods
129 *
130 * @ops: mcpm_platform_ops structure to register
131 *
132 * An error is returned if the registration has been done previously.
133 */
134int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
135
136/* Synchronisation structures for coordinating safe cluster setup/teardown: */
137
138/*
139 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
140 * to match.
141 */
142struct mcpm_sync_struct {
143 /* individual CPU states */
144 struct {
145 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
146 } cpus[MAX_CPUS_PER_CLUSTER];
147
148 /* cluster state */
149 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
150
151 /* inbound-side state */
152 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
153};
154
155struct sync_struct {
156 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
157};
158
159extern unsigned long sync_phys; /* physical address of *mcpm_sync */
160
161void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
162void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
163void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
164bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
165int __mcpm_cluster_state(unsigned int cluster);
166
167int __init mcpm_sync_init(
168 void (*power_up_setup)(unsigned int affinity_level));
169
170void __init mcpm_smp_set_ops(void);
171
172#else
173
174/*
175 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
176 * cannot be included in asm files. Let's work around the conflict like this.
177 */
178#include <asm/asm-offsets.h>
179#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
180
181#endif /* ! __ASSEMBLY__ */
182
183/* Definitions for mcpm_sync_struct */
184#define CPU_DOWN 0x11
185#define CPU_COMING_UP 0x12
186#define CPU_UP 0x13
187#define CPU_GOING_DOWN 0x14
188
189#define CLUSTER_DOWN 0x21
190#define CLUSTER_UP 0x22
191#define CLUSTER_GOING_DOWN 0x23
192
193#define INBOUND_NOT_COMING_UP 0x31
194#define INBOUND_COMING_UP 0x32
195
196/*
197 * Offsets for the mcpm_sync_struct members, for use in asm.
198 * We don't want to make them global to the kernel via asm-offsets.c.
199 */
200#define MCPM_SYNC_CLUSTER_CPUS 0
201#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
202#define MCPM_SYNC_CLUSTER_CLUSTER \
203 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
204#define MCPM_SYNC_CLUSTER_INBOUND \
205 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
206#define MCPM_SYNC_CLUSTER_SIZE \
207 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
208
209#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index cddda1f41f0f..1995d1a84060 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
152#define TIF_SYSCALL_AUDIT 9 152#define TIF_SYSCALL_AUDIT 9
153#define TIF_SYSCALL_TRACEPOINT 10 153#define TIF_SYSCALL_TRACEPOINT 10
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ 154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
155#define TIF_NOHZ 12 /* in adaptive nohz mode */
155#define TIF_USING_IWMMXT 17 156#define TIF_USING_IWMMXT 17
156#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
157#define TIF_RESTORE_SIGMASK 20 158#define TIF_RESTORE_SIGMASK 20
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ab865e65a84c..a3625d141c1d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -166,7 +166,7 @@
166# define v6wbi_always_flags (-1UL) 166# define v6wbi_always_flags (-1UL)
167#endif 167#endif
168 168
169#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 169#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
170 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ 170 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
171 TLB_V7_UIS_ASID | TLB_V7_UIS_BP) 171 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
172#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 172#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h
new file mode 100644
index 000000000000..0e2949b0fae9
--- /dev/null
+++ b/arch/arm/include/debug/uncompress.h
@@ -0,0 +1,7 @@
1#ifdef CONFIG_DEBUG_UNCOMPRESS
2extern void putc(int c);
3#else
4static inline void putc(int c) {}
5#endif
6static inline void flush(void) {}
7static inline void arch_decomp_setup(void) {}
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 023bfeb367bf..c1ee007523d7 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -53,12 +53,12 @@
53#define KVM_ARM_FIQ_spsr fiq_regs[7] 53#define KVM_ARM_FIQ_spsr fiq_regs[7]
54 54
55struct kvm_regs { 55struct kvm_regs {
56 struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ 56 struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
57 __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ 57 unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
58 __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ 58 unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
59 __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ 59 unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
60 __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ 60 unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
61 __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ 61 unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
62}; 62};
63 63
64/* Supported Processor Types */ 64/* Supported Processor Types */