diff options
author | Sascha Hauer <s.hauer@pengutronix.de> | 2011-08-08 02:22:41 -0400 |
---|---|---|
committer | Sascha Hauer <s.hauer@pengutronix.de> | 2011-08-08 02:22:41 -0400 |
commit | 1a43f2012455a977397deffe35912fd3f3ce17b9 (patch) | |
tree | 5189f337df44e7a495fbd097cd476b0380babd8c /arch/powerpc/include/asm | |
parent | e1b96ada659431669efaf3defa997abf5db68130 (diff) | |
parent | 322a8b034003c0d46d39af85bf24fee27b902f48 (diff) |
Merge commit 'v3.1-rc1' into imx-fixes
Diffstat (limited to 'arch/powerpc/include/asm')
46 files changed, 2060 insertions, 289 deletions
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h deleted file mode 100644 index a71c9c1455a7..000000000000 --- a/arch/powerpc/include/asm/8253pit.h +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | /* | ||
2 | * 8253/8254 Programmable Interval Timer | ||
3 | */ | ||
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b8f152ece025..e2a4c26ad377 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h | |||
@@ -181,21 +181,21 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
181 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 181 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
182 | 182 | ||
183 | /** | 183 | /** |
184 | * atomic_add_unless - add unless the number is a given value | 184 | * __atomic_add_unless - add unless the number is a given value |
185 | * @v: pointer of type atomic_t | 185 | * @v: pointer of type atomic_t |
186 | * @a: the amount to add to v... | 186 | * @a: the amount to add to v... |
187 | * @u: ...unless v is equal to u. | 187 | * @u: ...unless v is equal to u. |
188 | * | 188 | * |
189 | * Atomically adds @a to @v, so long as it was not @u. | 189 | * Atomically adds @a to @v, so long as it was not @u. |
190 | * Returns non-zero if @v was not @u, and zero otherwise. | 190 | * Returns the old value of @v. |
191 | */ | 191 | */ |
192 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 192 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
193 | { | 193 | { |
194 | int t; | 194 | int t; |
195 | 195 | ||
196 | __asm__ __volatile__ ( | 196 | __asm__ __volatile__ ( |
197 | PPC_RELEASE_BARRIER | 197 | PPC_RELEASE_BARRIER |
198 | "1: lwarx %0,0,%1 # atomic_add_unless\n\ | 198 | "1: lwarx %0,0,%1 # __atomic_add_unless\n\ |
199 | cmpw 0,%0,%3 \n\ | 199 | cmpw 0,%0,%3 \n\ |
200 | beq- 2f \n\ | 200 | beq- 2f \n\ |
201 | add %0,%2,%0 \n" | 201 | add %0,%2,%0 \n" |
@@ -209,10 +209,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
209 | : "r" (&v->counter), "r" (a), "r" (u) | 209 | : "r" (&v->counter), "r" (a), "r" (u) |
210 | : "cc", "memory"); | 210 | : "cc", "memory"); |
211 | 211 | ||
212 | return t != u; | 212 | return t; |
213 | } | 213 | } |
214 | 214 | ||
215 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
216 | 215 | ||
217 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) | 216 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
218 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) | 217 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
@@ -444,7 +443,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | |||
444 | * @u: ...unless v is equal to u. | 443 | * @u: ...unless v is equal to u. |
445 | * | 444 | * |
446 | * Atomically adds @a to @v, so long as it was not @u. | 445 | * Atomically adds @a to @v, so long as it was not @u. |
447 | * Returns non-zero if @v was not @u, and zero otherwise. | 446 | * Returns the old value of @v. |
448 | */ | 447 | */ |
449 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 448 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
450 | { | 449 | { |
@@ -452,7 +451,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
452 | 451 | ||
453 | __asm__ __volatile__ ( | 452 | __asm__ __volatile__ ( |
454 | PPC_RELEASE_BARRIER | 453 | PPC_RELEASE_BARRIER |
455 | "1: ldarx %0,0,%1 # atomic_add_unless\n\ | 454 | "1: ldarx %0,0,%1 # __atomic_add_unless\n\ |
456 | cmpd 0,%0,%3 \n\ | 455 | cmpd 0,%0,%3 \n\ |
457 | beq- 2f \n\ | 456 | beq- 2f \n\ |
458 | add %0,%2,%0 \n" | 457 | add %0,%2,%0 \n" |
@@ -470,11 +469,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
470 | 469 | ||
471 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 470 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
472 | 471 | ||
473 | #else /* __powerpc64__ */ | ||
474 | #include <asm-generic/atomic64.h> | ||
475 | |||
476 | #endif /* __powerpc64__ */ | 472 | #endif /* __powerpc64__ */ |
477 | 473 | ||
478 | #include <asm-generic/atomic-long.h> | ||
479 | #endif /* __KERNEL__ */ | 474 | #endif /* __KERNEL__ */ |
480 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ | 475 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ |
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index f18c6d9b9510..e137afcc10fa 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h | |||
@@ -327,10 +327,7 @@ unsigned long find_next_bit_le(const void *addr, | |||
327 | unsigned long size, unsigned long offset); | 327 | unsigned long size, unsigned long offset); |
328 | /* Bitmap functions for the ext2 filesystem */ | 328 | /* Bitmap functions for the ext2 filesystem */ |
329 | 329 | ||
330 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 330 | #include <asm-generic/bitops/ext2-atomic-setbit.h> |
331 | test_and_set_bit_le((nr), (unsigned long*)addr) | ||
332 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
333 | test_and_clear_bit_le((nr), (unsigned long*)addr) | ||
334 | 331 | ||
335 | #include <asm-generic/bitops/sched.h> | 332 | #include <asm-generic/bitops/sched.h> |
336 | 333 | ||
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index c0d842cfd012..e30442c539ce 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -179,8 +179,9 @@ extern const char *powerpc_base_platform; | |||
179 | #define LONG_ASM_CONST(x) 0 | 179 | #define LONG_ASM_CONST(x) 0 |
180 | #endif | 180 | #endif |
181 | 181 | ||
182 | 182 | #define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000) | |
183 | #define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000) | 183 | #define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000) |
184 | #define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000) | ||
184 | #define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) | 185 | #define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) |
185 | #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) | 186 | #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) |
186 | #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) | 187 | #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) |
@@ -401,9 +402,10 @@ extern const char *powerpc_base_platform; | |||
401 | CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ | 402 | CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ |
402 | CPU_FTR_STCX_CHECKS_ADDRESS) | 403 | CPU_FTR_STCX_CHECKS_ADDRESS) |
403 | #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 404 | #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
404 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 405 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ |
405 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ | 406 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ |
406 | CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS) | 407 | CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ |
408 | CPU_FTR_HVMODE) | ||
407 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 409 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
408 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 410 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
409 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 411 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
@@ -417,13 +419,13 @@ extern const char *powerpc_base_platform; | |||
417 | CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ | 419 | CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ |
418 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) | 420 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) |
419 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 421 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
420 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\ | 422 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ |
421 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 423 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
422 | CPU_FTR_COHERENT_ICACHE | \ | 424 | CPU_FTR_COHERENT_ICACHE | \ |
423 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ | 425 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ |
424 | CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ | 426 | CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ |
425 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ | 427 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ |
426 | CPU_FTR_ICSWX | CPU_FTR_CFAR) | 428 | CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE) |
427 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 429 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
428 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 430 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
429 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 431 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index 9c70d0ca96d4..efa74ac44a35 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/ppc-opcode.h> | 18 | #include <asm/ppc-opcode.h> |
19 | 19 | ||
20 | #define PPC_DBELL_MSG_BRDCAST (0x04000000) | 20 | #define PPC_DBELL_MSG_BRDCAST (0x04000000) |
21 | #define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28) | 21 | #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) |
22 | enum ppc_dbell { | 22 | enum ppc_dbell { |
23 | PPC_DBELL = 0, /* doorbell */ | 23 | PPC_DBELL = 0, /* doorbell */ |
24 | PPC_DBELL_CRIT = 1, /* critical doorbell */ | 24 | PPC_DBELL_CRIT = 1, /* critical doorbell */ |
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h new file mode 100644 index 000000000000..a9e1f4f796f6 --- /dev/null +++ b/arch/powerpc/include/asm/ehv_pic.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * EHV_PIC private definitions and structure. | ||
3 | * | ||
4 | * Copyright 2008-2010 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public License | ||
7 | * version 2. This program is licensed "as is" without any warranty of any | ||
8 | * kind, whether express or implied. | ||
9 | */ | ||
10 | #ifndef __EHV_PIC_H__ | ||
11 | #define __EHV_PIC_H__ | ||
12 | |||
13 | #include <linux/irq.h> | ||
14 | |||
15 | #define NR_EHV_PIC_INTS 1024 | ||
16 | |||
17 | #define EHV_PIC_INFO(name) EHV_PIC_##name | ||
18 | |||
19 | #define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0 | ||
20 | #define EHV_PIC_VECPRI_POLARITY_POSITIVE 1 | ||
21 | #define EHV_PIC_VECPRI_SENSE_EDGE 0 | ||
22 | #define EHV_PIC_VECPRI_SENSE_LEVEL 0x2 | ||
23 | #define EHV_PIC_VECPRI_POLARITY_MASK 0x1 | ||
24 | #define EHV_PIC_VECPRI_SENSE_MASK 0x2 | ||
25 | |||
26 | struct ehv_pic { | ||
27 | /* The remapper for this EHV_PIC */ | ||
28 | struct irq_host *irqhost; | ||
29 | |||
30 | /* The "linux" controller struct */ | ||
31 | struct irq_chip hc_irq; | ||
32 | |||
33 | /* core int flag */ | ||
34 | int coreint_flag; | ||
35 | }; | ||
36 | |||
37 | void ehv_pic_init(void); | ||
38 | unsigned int ehv_pic_get_irq(void); | ||
39 | |||
40 | #endif /* __EHV_PIC_H__ */ | ||
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 2b917c69ed15..3bf9cca35147 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h | |||
@@ -267,7 +267,7 @@ extern int ucache_bsize; | |||
267 | struct linux_binprm; | 267 | struct linux_binprm; |
268 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 268 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
269 | int uses_interp); | 269 | int uses_interp); |
270 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); | 270 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b) |
271 | 271 | ||
272 | /* 1GB for 64bit, 8MB for 32bit */ | 272 | /* 1GB for 64bit, 8MB for 32bit */ |
273 | #define STACK_RND_MASK (is_32bit_task() ? \ | 273 | #define STACK_RND_MASK (is_32bit_task() ? \ |
@@ -298,7 +298,7 @@ do { \ | |||
298 | NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ | 298 | NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ |
299 | NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ | 299 | NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ |
300 | NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ | 300 | NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ |
301 | VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \ | 301 | VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ |
302 | } while (0) | 302 | } while (0) |
303 | 303 | ||
304 | /* PowerPC64 relocations defined by the ABIs */ | 304 | /* PowerPC64 relocations defined by the ABIs */ |
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 45921672b97a..63f2a22e9954 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #ifndef _ASM_POWERPC_EMULATED_OPS_H | 18 | #ifndef _ASM_POWERPC_EMULATED_OPS_H |
19 | #define _ASM_POWERPC_EMULATED_OPS_H | 19 | #define _ASM_POWERPC_EMULATED_OPS_H |
20 | 20 | ||
21 | #include <asm/atomic.h> | 21 | #include <linux/atomic.h> |
22 | #include <linux/perf_event.h> | 22 | #include <linux/perf_event.h> |
23 | 23 | ||
24 | 24 | ||
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type); | |||
78 | #define PPC_WARN_EMULATED(type, regs) \ | 78 | #define PPC_WARN_EMULATED(type, regs) \ |
79 | do { \ | 79 | do { \ |
80 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ | 80 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ |
81 | 1, 0, regs, 0); \ | 81 | 1, regs, 0); \ |
82 | __PPC_WARN_EMULATED(type); \ | 82 | __PPC_WARN_EMULATED(type); \ |
83 | } while (0) | 83 | } while (0) |
84 | 84 | ||
85 | #define PPC_WARN_ALIGNMENT(type, regs) \ | 85 | #define PPC_WARN_ALIGNMENT(type, regs) \ |
86 | do { \ | 86 | do { \ |
87 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ | 87 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ |
88 | 1, 0, regs, regs->dar); \ | 88 | 1, regs, regs->dar); \ |
89 | __PPC_WARN_EMULATED(type); \ | 89 | __PPC_WARN_EMULATED(type); \ |
90 | } while (0) | 90 | } while (0) |
91 | 91 | ||
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h new file mode 100644 index 000000000000..f3b0c2cc9fea --- /dev/null +++ b/arch/powerpc/include/asm/epapr_hcalls.h | |||
@@ -0,0 +1,502 @@ | |||
1 | /* | ||
2 | * ePAPR hcall interface | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Author: Timur Tabi <timur@freescale.com> | ||
7 | * | ||
8 | * This file is provided under a dual BSD/GPL license. When using or | ||
9 | * redistributing this file, you may do so under either license. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions are met: | ||
13 | * * Redistributions of source code must retain the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer. | ||
15 | * * Redistributions in binary form must reproduce the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer in the | ||
17 | * documentation and/or other materials provided with the distribution. | ||
18 | * * Neither the name of Freescale Semiconductor nor the | ||
19 | * names of its contributors may be used to endorse or promote products | ||
20 | * derived from this software without specific prior written permission. | ||
21 | * | ||
22 | * | ||
23 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
24 | * GNU General Public License ("GPL") as published by the Free Software | ||
25 | * Foundation, either version 2 of that License or (at your option) any | ||
26 | * later version. | ||
27 | * | ||
28 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
29 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
30 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
31 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
32 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
33 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
34 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
37 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
38 | */ | ||
39 | |||
40 | /* A "hypercall" is an "sc 1" instruction. This header file file provides C | ||
41 | * wrapper functions for the ePAPR hypervisor interface. It is inteded | ||
42 | * for use by Linux device drivers and other operating systems. | ||
43 | * | ||
44 | * The hypercalls are implemented as inline assembly, rather than assembly | ||
45 | * language functions in a .S file, for optimization. It allows | ||
46 | * the caller to issue the hypercall instruction directly, improving both | ||
47 | * performance and memory footprint. | ||
48 | */ | ||
49 | |||
50 | #ifndef _EPAPR_HCALLS_H | ||
51 | #define _EPAPR_HCALLS_H | ||
52 | |||
53 | #include <linux/types.h> | ||
54 | #include <linux/errno.h> | ||
55 | #include <asm/byteorder.h> | ||
56 | |||
57 | #define EV_BYTE_CHANNEL_SEND 1 | ||
58 | #define EV_BYTE_CHANNEL_RECEIVE 2 | ||
59 | #define EV_BYTE_CHANNEL_POLL 3 | ||
60 | #define EV_INT_SET_CONFIG 4 | ||
61 | #define EV_INT_GET_CONFIG 5 | ||
62 | #define EV_INT_SET_MASK 6 | ||
63 | #define EV_INT_GET_MASK 7 | ||
64 | #define EV_INT_IACK 9 | ||
65 | #define EV_INT_EOI 10 | ||
66 | #define EV_INT_SEND_IPI 11 | ||
67 | #define EV_INT_SET_TASK_PRIORITY 12 | ||
68 | #define EV_INT_GET_TASK_PRIORITY 13 | ||
69 | #define EV_DOORBELL_SEND 14 | ||
70 | #define EV_MSGSND 15 | ||
71 | #define EV_IDLE 16 | ||
72 | |||
73 | /* vendor ID: epapr */ | ||
74 | #define EV_LOCAL_VENDOR_ID 0 /* for private use */ | ||
75 | #define EV_EPAPR_VENDOR_ID 1 | ||
76 | #define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */ | ||
77 | #define EV_IBM_VENDOR_ID 3 /* IBM */ | ||
78 | #define EV_GHS_VENDOR_ID 4 /* Green Hills Software */ | ||
79 | #define EV_ENEA_VENDOR_ID 5 /* Enea */ | ||
80 | #define EV_WR_VENDOR_ID 6 /* Wind River Systems */ | ||
81 | #define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */ | ||
82 | #define EV_KVM_VENDOR_ID 42 /* KVM */ | ||
83 | |||
84 | /* The max number of bytes that a byte channel can send or receive per call */ | ||
85 | #define EV_BYTE_CHANNEL_MAX_BYTES 16 | ||
86 | |||
87 | |||
88 | #define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num)) | ||
89 | #define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num) | ||
90 | |||
91 | /* epapr error codes */ | ||
92 | #define EV_EPERM 1 /* Operation not permitted */ | ||
93 | #define EV_ENOENT 2 /* Entry Not Found */ | ||
94 | #define EV_EIO 3 /* I/O error occured */ | ||
95 | #define EV_EAGAIN 4 /* The operation had insufficient | ||
96 | * resources to complete and should be | ||
97 | * retried | ||
98 | */ | ||
99 | #define EV_ENOMEM 5 /* There was insufficient memory to | ||
100 | * complete the operation */ | ||
101 | #define EV_EFAULT 6 /* Bad guest address */ | ||
102 | #define EV_ENODEV 7 /* No such device */ | ||
103 | #define EV_EINVAL 8 /* An argument supplied to the hcall | ||
104 | was out of range or invalid */ | ||
105 | #define EV_INTERNAL 9 /* An internal error occured */ | ||
106 | #define EV_CONFIG 10 /* A configuration error was detected */ | ||
107 | #define EV_INVALID_STATE 11 /* The object is in an invalid state */ | ||
108 | #define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */ | ||
109 | #define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */ | ||
110 | |||
111 | /* | ||
112 | * Hypercall register clobber list | ||
113 | * | ||
114 | * These macros are used to define the list of clobbered registers during a | ||
115 | * hypercall. Technically, registers r0 and r3-r12 are always clobbered, | ||
116 | * but the gcc inline assembly syntax does not allow us to specify registers | ||
117 | * on the clobber list that are also on the input/output list. Therefore, | ||
118 | * the lists of clobbered registers depends on the number of register | ||
119 | * parmeters ("+r" and "=r") passed to the hypercall. | ||
120 | * | ||
121 | * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a | ||
122 | * general rule, 'x' is the number of parameters passed to the assembly | ||
123 | * block *except* for r11. | ||
124 | * | ||
125 | * If you're not sure, just use the smallest value of 'x' that does not | ||
126 | * generate a compilation error. Because these are static inline functions, | ||
127 | * the compiler will only check the clobber list for a function if you | ||
128 | * compile code that calls that function. | ||
129 | * | ||
130 | * r3 and r11 are not included in any clobbers list because they are always | ||
131 | * listed as output registers. | ||
132 | * | ||
133 | * XER, CTR, and LR are currently listed as clobbers because it's uncertain | ||
134 | * whether they will be clobbered. | ||
135 | * | ||
136 | * Note that r11 can be used as an output parameter. | ||
137 | */ | ||
138 | |||
139 | /* List of common clobbered registers. Do not use this macro. */ | ||
140 | #define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc" | ||
141 | |||
142 | #define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS | ||
143 | #define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10" | ||
144 | #define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9" | ||
145 | #define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8" | ||
146 | #define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7" | ||
147 | #define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6" | ||
148 | #define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5" | ||
149 | #define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4" | ||
150 | |||
151 | |||
152 | /* | ||
153 | * We use "uintptr_t" to define a register because it's guaranteed to be a | ||
154 | * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit | ||
155 | * platform. | ||
156 | * | ||
157 | * All registers are either input/output or output only. Registers that are | ||
158 | * initialized before making the hypercall are input/output. All | ||
159 | * input/output registers are represented with "+r". Output-only registers | ||
160 | * are represented with "=r". Do not specify any unused registers. The | ||
161 | * clobber list will tell the compiler that the hypercall modifies those | ||
162 | * registers, which is good enough. | ||
163 | */ | ||
164 | |||
165 | /** | ||
166 | * ev_int_set_config - configure the specified interrupt | ||
167 | * @interrupt: the interrupt number | ||
168 | * @config: configuration for this interrupt | ||
169 | * @priority: interrupt priority | ||
170 | * @destination: destination CPU number | ||
171 | * | ||
172 | * Returns 0 for success, or an error code. | ||
173 | */ | ||
174 | static inline unsigned int ev_int_set_config(unsigned int interrupt, | ||
175 | uint32_t config, unsigned int priority, uint32_t destination) | ||
176 | { | ||
177 | register uintptr_t r11 __asm__("r11"); | ||
178 | register uintptr_t r3 __asm__("r3"); | ||
179 | register uintptr_t r4 __asm__("r4"); | ||
180 | register uintptr_t r5 __asm__("r5"); | ||
181 | register uintptr_t r6 __asm__("r6"); | ||
182 | |||
183 | r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG); | ||
184 | r3 = interrupt; | ||
185 | r4 = config; | ||
186 | r5 = priority; | ||
187 | r6 = destination; | ||
188 | |||
189 | __asm__ __volatile__ ("sc 1" | ||
190 | : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6) | ||
191 | : : EV_HCALL_CLOBBERS4 | ||
192 | ); | ||
193 | |||
194 | return r3; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * ev_int_get_config - return the config of the specified interrupt | ||
199 | * @interrupt: the interrupt number | ||
200 | * @config: returned configuration for this interrupt | ||
201 | * @priority: returned interrupt priority | ||
202 | * @destination: returned destination CPU number | ||
203 | * | ||
204 | * Returns 0 for success, or an error code. | ||
205 | */ | ||
206 | static inline unsigned int ev_int_get_config(unsigned int interrupt, | ||
207 | uint32_t *config, unsigned int *priority, uint32_t *destination) | ||
208 | { | ||
209 | register uintptr_t r11 __asm__("r11"); | ||
210 | register uintptr_t r3 __asm__("r3"); | ||
211 | register uintptr_t r4 __asm__("r4"); | ||
212 | register uintptr_t r5 __asm__("r5"); | ||
213 | register uintptr_t r6 __asm__("r6"); | ||
214 | |||
215 | r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG); | ||
216 | r3 = interrupt; | ||
217 | |||
218 | __asm__ __volatile__ ("sc 1" | ||
219 | : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6) | ||
220 | : : EV_HCALL_CLOBBERS4 | ||
221 | ); | ||
222 | |||
223 | *config = r4; | ||
224 | *priority = r5; | ||
225 | *destination = r6; | ||
226 | |||
227 | return r3; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * ev_int_set_mask - sets the mask for the specified interrupt source | ||
232 | * @interrupt: the interrupt number | ||
233 | * @mask: 0=enable interrupts, 1=disable interrupts | ||
234 | * | ||
235 | * Returns 0 for success, or an error code. | ||
236 | */ | ||
237 | static inline unsigned int ev_int_set_mask(unsigned int interrupt, | ||
238 | unsigned int mask) | ||
239 | { | ||
240 | register uintptr_t r11 __asm__("r11"); | ||
241 | register uintptr_t r3 __asm__("r3"); | ||
242 | register uintptr_t r4 __asm__("r4"); | ||
243 | |||
244 | r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK); | ||
245 | r3 = interrupt; | ||
246 | r4 = mask; | ||
247 | |||
248 | __asm__ __volatile__ ("sc 1" | ||
249 | : "+r" (r11), "+r" (r3), "+r" (r4) | ||
250 | : : EV_HCALL_CLOBBERS2 | ||
251 | ); | ||
252 | |||
253 | return r3; | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * ev_int_get_mask - returns the mask for the specified interrupt source | ||
258 | * @interrupt: the interrupt number | ||
259 | * @mask: returned mask for this interrupt (0=enabled, 1=disabled) | ||
260 | * | ||
261 | * Returns 0 for success, or an error code. | ||
262 | */ | ||
263 | static inline unsigned int ev_int_get_mask(unsigned int interrupt, | ||
264 | unsigned int *mask) | ||
265 | { | ||
266 | register uintptr_t r11 __asm__("r11"); | ||
267 | register uintptr_t r3 __asm__("r3"); | ||
268 | register uintptr_t r4 __asm__("r4"); | ||
269 | |||
270 | r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK); | ||
271 | r3 = interrupt; | ||
272 | |||
273 | __asm__ __volatile__ ("sc 1" | ||
274 | : "+r" (r11), "+r" (r3), "=r" (r4) | ||
275 | : : EV_HCALL_CLOBBERS2 | ||
276 | ); | ||
277 | |||
278 | *mask = r4; | ||
279 | |||
280 | return r3; | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * ev_int_eoi - signal the end of interrupt processing | ||
285 | * @interrupt: the interrupt number | ||
286 | * | ||
287 | * This function signals the end of processing for the the specified | ||
288 | * interrupt, which must be the interrupt currently in service. By | ||
289 | * definition, this is also the highest-priority interrupt. | ||
290 | * | ||
291 | * Returns 0 for success, or an error code. | ||
292 | */ | ||
293 | static inline unsigned int ev_int_eoi(unsigned int interrupt) | ||
294 | { | ||
295 | register uintptr_t r11 __asm__("r11"); | ||
296 | register uintptr_t r3 __asm__("r3"); | ||
297 | |||
298 | r11 = EV_HCALL_TOKEN(EV_INT_EOI); | ||
299 | r3 = interrupt; | ||
300 | |||
301 | __asm__ __volatile__ ("sc 1" | ||
302 | : "+r" (r11), "+r" (r3) | ||
303 | : : EV_HCALL_CLOBBERS1 | ||
304 | ); | ||
305 | |||
306 | return r3; | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * ev_byte_channel_send - send characters to a byte stream | ||
311 | * @handle: byte stream handle | ||
312 | * @count: (input) num of chars to send, (output) num chars sent | ||
313 | * @buffer: pointer to a 16-byte buffer | ||
314 | * | ||
315 | * @buffer must be at least 16 bytes long, because all 16 bytes will be | ||
316 | * read from memory into registers, even if count < 16. | ||
317 | * | ||
318 | * Returns 0 for success, or an error code. | ||
319 | */ | ||
320 | static inline unsigned int ev_byte_channel_send(unsigned int handle, | ||
321 | unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES]) | ||
322 | { | ||
323 | register uintptr_t r11 __asm__("r11"); | ||
324 | register uintptr_t r3 __asm__("r3"); | ||
325 | register uintptr_t r4 __asm__("r4"); | ||
326 | register uintptr_t r5 __asm__("r5"); | ||
327 | register uintptr_t r6 __asm__("r6"); | ||
328 | register uintptr_t r7 __asm__("r7"); | ||
329 | register uintptr_t r8 __asm__("r8"); | ||
330 | const uint32_t *p = (const uint32_t *) buffer; | ||
331 | |||
332 | r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND); | ||
333 | r3 = handle; | ||
334 | r4 = *count; | ||
335 | r5 = be32_to_cpu(p[0]); | ||
336 | r6 = be32_to_cpu(p[1]); | ||
337 | r7 = be32_to_cpu(p[2]); | ||
338 | r8 = be32_to_cpu(p[3]); | ||
339 | |||
340 | __asm__ __volatile__ ("sc 1" | ||
341 | : "+r" (r11), "+r" (r3), | ||
342 | "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8) | ||
343 | : : EV_HCALL_CLOBBERS6 | ||
344 | ); | ||
345 | |||
346 | *count = r4; | ||
347 | |||
348 | return r3; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * ev_byte_channel_receive - fetch characters from a byte channel | ||
353 | * @handle: byte channel handle | ||
354 | * @count: (input) max num of chars to receive, (output) num chars received | ||
355 | * @buffer: pointer to a 16-byte buffer | ||
356 | * | ||
357 | * The size of @buffer must be at least 16 bytes, even if you request fewer | ||
358 | * than 16 characters, because we always write 16 bytes to @buffer. This is | ||
359 | * for performance reasons. | ||
360 | * | ||
361 | * Returns 0 for success, or an error code. | ||
362 | */ | ||
363 | static inline unsigned int ev_byte_channel_receive(unsigned int handle, | ||
364 | unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES]) | ||
365 | { | ||
366 | register uintptr_t r11 __asm__("r11"); | ||
367 | register uintptr_t r3 __asm__("r3"); | ||
368 | register uintptr_t r4 __asm__("r4"); | ||
369 | register uintptr_t r5 __asm__("r5"); | ||
370 | register uintptr_t r6 __asm__("r6"); | ||
371 | register uintptr_t r7 __asm__("r7"); | ||
372 | register uintptr_t r8 __asm__("r8"); | ||
373 | uint32_t *p = (uint32_t *) buffer; | ||
374 | |||
375 | r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE); | ||
376 | r3 = handle; | ||
377 | r4 = *count; | ||
378 | |||
379 | __asm__ __volatile__ ("sc 1" | ||
380 | : "+r" (r11), "+r" (r3), "+r" (r4), | ||
381 | "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8) | ||
382 | : : EV_HCALL_CLOBBERS6 | ||
383 | ); | ||
384 | |||
385 | *count = r4; | ||
386 | p[0] = cpu_to_be32(r5); | ||
387 | p[1] = cpu_to_be32(r6); | ||
388 | p[2] = cpu_to_be32(r7); | ||
389 | p[3] = cpu_to_be32(r8); | ||
390 | |||
391 | return r3; | ||
392 | } | ||
393 | |||
394 | /** | ||
395 | * ev_byte_channel_poll - returns the status of the byte channel buffers | ||
396 | * @handle: byte channel handle | ||
397 | * @rx_count: returned count of bytes in receive queue | ||
398 | * @tx_count: returned count of free space in transmit queue | ||
399 | * | ||
400 | * This function reports the amount of data in the receive queue (i.e. the | ||
401 | * number of bytes you can read), and the amount of free space in the transmit | ||
402 | * queue (i.e. the number of bytes you can write). | ||
403 | * | ||
404 | * Returns 0 for success, or an error code. | ||
405 | */ | ||
406 | static inline unsigned int ev_byte_channel_poll(unsigned int handle, | ||
407 | unsigned int *rx_count, unsigned int *tx_count) | ||
408 | { | ||
409 | register uintptr_t r11 __asm__("r11"); | ||
410 | register uintptr_t r3 __asm__("r3"); | ||
411 | register uintptr_t r4 __asm__("r4"); | ||
412 | register uintptr_t r5 __asm__("r5"); | ||
413 | |||
414 | r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL); | ||
415 | r3 = handle; | ||
416 | |||
417 | __asm__ __volatile__ ("sc 1" | ||
418 | : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5) | ||
419 | : : EV_HCALL_CLOBBERS3 | ||
420 | ); | ||
421 | |||
422 | *rx_count = r4; | ||
423 | *tx_count = r5; | ||
424 | |||
425 | return r3; | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * ev_int_iack - acknowledge an interrupt | ||
430 | * @handle: handle to the target interrupt controller | ||
431 | * @vector: returned interrupt vector | ||
432 | * | ||
433 | * If handle is zero, the function returns the next interrupt source | ||
434 | * number to be handled irrespective of the hierarchy or cascading | ||
435 | * of interrupt controllers. If non-zero, specifies a handle to the | ||
436 | * interrupt controller that is the target of the acknowledge. | ||
437 | * | ||
438 | * Returns 0 for success, or an error code. | ||
439 | */ | ||
440 | static inline unsigned int ev_int_iack(unsigned int handle, | ||
441 | unsigned int *vector) | ||
442 | { | ||
443 | register uintptr_t r11 __asm__("r11"); | ||
444 | register uintptr_t r3 __asm__("r3"); | ||
445 | register uintptr_t r4 __asm__("r4"); | ||
446 | |||
447 | r11 = EV_HCALL_TOKEN(EV_INT_IACK); | ||
448 | r3 = handle; | ||
449 | |||
450 | __asm__ __volatile__ ("sc 1" | ||
451 | : "+r" (r11), "+r" (r3), "=r" (r4) | ||
452 | : : EV_HCALL_CLOBBERS2 | ||
453 | ); | ||
454 | |||
455 | *vector = r4; | ||
456 | |||
457 | return r3; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * ev_doorbell_send - send a doorbell to another partition | ||
462 | * @handle: doorbell send handle | ||
463 | * | ||
464 | * Returns 0 for success, or an error code. | ||
465 | */ | ||
466 | static inline unsigned int ev_doorbell_send(unsigned int handle) | ||
467 | { | ||
468 | register uintptr_t r11 __asm__("r11"); | ||
469 | register uintptr_t r3 __asm__("r3"); | ||
470 | |||
471 | r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND); | ||
472 | r3 = handle; | ||
473 | |||
474 | __asm__ __volatile__ ("sc 1" | ||
475 | : "+r" (r11), "+r" (r3) | ||
476 | : : EV_HCALL_CLOBBERS1 | ||
477 | ); | ||
478 | |||
479 | return r3; | ||
480 | } | ||
481 | |||
482 | /** | ||
483 | * ev_idle -- wait for next interrupt on this core | ||
484 | * | ||
485 | * Returns 0 for success, or an error code. | ||
486 | */ | ||
487 | static inline unsigned int ev_idle(void) | ||
488 | { | ||
489 | register uintptr_t r11 __asm__("r11"); | ||
490 | register uintptr_t r3 __asm__("r3"); | ||
491 | |||
492 | r11 = EV_HCALL_TOKEN(EV_IDLE); | ||
493 | |||
494 | __asm__ __volatile__ ("sc 1" | ||
495 | : "+r" (r11), "=r" (r3) | ||
496 | : : EV_HCALL_CLOBBERS1 | ||
497 | ); | ||
498 | |||
499 | return r3; | ||
500 | } | ||
501 | |||
502 | #endif | ||
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index 6d53f311d942..ac13addb8495 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h | |||
@@ -48,30 +48,33 @@ | |||
48 | #define EX_R14 (4 * 8) | 48 | #define EX_R14 (4 * 8) |
49 | #define EX_R15 (5 * 8) | 49 | #define EX_R15 (5 * 8) |
50 | 50 | ||
51 | /* The TLB miss exception uses different slots */ | 51 | /* |
52 | * The TLB miss exception uses different slots. | ||
53 | * | ||
54 | * The bolted variant uses only the first six fields, | ||
55 | * which in combination with pgd and kernel_pgd fits in | ||
56 | * one 64-byte cache line. | ||
57 | */ | ||
52 | 58 | ||
53 | #define EX_TLB_R10 ( 0 * 8) | 59 | #define EX_TLB_R10 ( 0 * 8) |
54 | #define EX_TLB_R11 ( 1 * 8) | 60 | #define EX_TLB_R11 ( 1 * 8) |
55 | #define EX_TLB_R12 ( 2 * 8) | 61 | #define EX_TLB_R14 ( 2 * 8) |
56 | #define EX_TLB_R13 ( 3 * 8) | 62 | #define EX_TLB_R15 ( 3 * 8) |
57 | #define EX_TLB_R14 ( 4 * 8) | 63 | #define EX_TLB_R16 ( 4 * 8) |
58 | #define EX_TLB_R15 ( 5 * 8) | 64 | #define EX_TLB_CR ( 5 * 8) |
59 | #define EX_TLB_R16 ( 6 * 8) | 65 | #define EX_TLB_R12 ( 6 * 8) |
60 | #define EX_TLB_CR ( 7 * 8) | 66 | #define EX_TLB_R13 ( 7 * 8) |
61 | #define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */ | 67 | #define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */ |
62 | #define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */ | 68 | #define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */ |
63 | #define EX_TLB_SRR0 (10 * 8) | 69 | #define EX_TLB_SRR0 (10 * 8) |
64 | #define EX_TLB_SRR1 (11 * 8) | 70 | #define EX_TLB_SRR1 (11 * 8) |
65 | #define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */ | ||
66 | #define EX_TLB_MAS1 (12 * 8) /* Level 0 */ | ||
67 | #define EX_TLB_MAS2 (13 * 8) /* Level 0 */ | ||
68 | #ifdef CONFIG_BOOK3E_MMU_TLB_STATS | 71 | #ifdef CONFIG_BOOK3E_MMU_TLB_STATS |
69 | #define EX_TLB_R8 (14 * 8) | 72 | #define EX_TLB_R8 (12 * 8) |
70 | #define EX_TLB_R9 (15 * 8) | 73 | #define EX_TLB_R9 (13 * 8) |
71 | #define EX_TLB_LR (16 * 8) | 74 | #define EX_TLB_LR (14 * 8) |
72 | #define EX_TLB_SIZE (17 * 8) | 75 | #define EX_TLB_SIZE (15 * 8) |
73 | #else | 76 | #else |
74 | #define EX_TLB_SIZE (14 * 8) | 77 | #define EX_TLB_SIZE (12 * 8) |
75 | #endif | 78 | #endif |
76 | 79 | ||
77 | #define START_EXCEPTION(label) \ | 80 | #define START_EXCEPTION(label) \ |
@@ -168,6 +171,16 @@ exc_##label##_book3e: | |||
168 | ld r9,EX_TLB_R9(r12); \ | 171 | ld r9,EX_TLB_R9(r12); \ |
169 | ld r8,EX_TLB_R8(r12); \ | 172 | ld r8,EX_TLB_R8(r12); \ |
170 | mtlr r16; | 173 | mtlr r16; |
174 | #define TLB_MISS_PROLOG_STATS_BOLTED \ | ||
175 | mflr r10; \ | ||
176 | std r8,PACA_EXTLB+EX_TLB_R8(r13); \ | ||
177 | std r9,PACA_EXTLB+EX_TLB_R9(r13); \ | ||
178 | std r10,PACA_EXTLB+EX_TLB_LR(r13); | ||
179 | #define TLB_MISS_RESTORE_STATS_BOLTED \ | ||
180 | ld r16,PACA_EXTLB+EX_TLB_LR(r13); \ | ||
181 | ld r9,PACA_EXTLB+EX_TLB_R9(r13); \ | ||
182 | ld r8,PACA_EXTLB+EX_TLB_R8(r13); \ | ||
183 | mtlr r16; | ||
171 | #define TLB_MISS_STATS_D(name) \ | 184 | #define TLB_MISS_STATS_D(name) \ |
172 | addi r9,r13,MMSTAT_DSTATS+name; \ | 185 | addi r9,r13,MMSTAT_DSTATS+name; \ |
173 | bl .tlb_stat_inc; | 186 | bl .tlb_stat_inc; |
@@ -183,17 +196,20 @@ exc_##label##_book3e: | |||
183 | 61: addi r9,r13,MMSTAT_ISTATS+name; \ | 196 | 61: addi r9,r13,MMSTAT_ISTATS+name; \ |
184 | 62: bl .tlb_stat_inc; | 197 | 62: bl .tlb_stat_inc; |
185 | #define TLB_MISS_STATS_SAVE_INFO \ | 198 | #define TLB_MISS_STATS_SAVE_INFO \ |
186 | std r14,EX_TLB_ESR(r12); /* save ESR */ \ | 199 | std r14,EX_TLB_ESR(r12); /* save ESR */ |
187 | 200 | #define TLB_MISS_STATS_SAVE_INFO_BOLTED \ | |
188 | 201 | std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */ | |
189 | #else | 202 | #else |
190 | #define TLB_MISS_PROLOG_STATS | 203 | #define TLB_MISS_PROLOG_STATS |
191 | #define TLB_MISS_RESTORE_STATS | 204 | #define TLB_MISS_RESTORE_STATS |
205 | #define TLB_MISS_PROLOG_STATS_BOLTED | ||
206 | #define TLB_MISS_RESTORE_STATS_BOLTED | ||
192 | #define TLB_MISS_STATS_D(name) | 207 | #define TLB_MISS_STATS_D(name) |
193 | #define TLB_MISS_STATS_I(name) | 208 | #define TLB_MISS_STATS_I(name) |
194 | #define TLB_MISS_STATS_X(name) | 209 | #define TLB_MISS_STATS_X(name) |
195 | #define TLB_MISS_STATS_Y(name) | 210 | #define TLB_MISS_STATS_Y(name) |
196 | #define TLB_MISS_STATS_SAVE_INFO | 211 | #define TLB_MISS_STATS_SAVE_INFO |
212 | #define TLB_MISS_STATS_SAVE_INFO_BOLTED | ||
197 | #endif | 213 | #endif |
198 | 214 | ||
199 | #define SET_IVOR(vector_number, vector_offset) \ | 215 | #define SET_IVOR(vector_number, vector_offset) \ |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index f5dfe3411f64..8057f4f6980f 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -61,19 +61,22 @@ | |||
61 | #define EXC_HV H | 61 | #define EXC_HV H |
62 | #define EXC_STD | 62 | #define EXC_STD |
63 | 63 | ||
64 | #define EXCEPTION_PROLOG_1(area) \ | 64 | #define __EXCEPTION_PROLOG_1(area, extra, vec) \ |
65 | GET_PACA(r13); \ | 65 | GET_PACA(r13); \ |
66 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | 66 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ |
67 | std r10,area+EX_R10(r13); \ | 67 | std r10,area+EX_R10(r13); \ |
68 | std r11,area+EX_R11(r13); \ | ||
69 | std r12,area+EX_R12(r13); \ | ||
70 | BEGIN_FTR_SECTION_NESTED(66); \ | 68 | BEGIN_FTR_SECTION_NESTED(66); \ |
71 | mfspr r10,SPRN_CFAR; \ | 69 | mfspr r10,SPRN_CFAR; \ |
72 | std r10,area+EX_CFAR(r13); \ | 70 | std r10,area+EX_CFAR(r13); \ |
73 | END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ | 71 | END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ |
74 | GET_SCRATCH0(r9); \ | 72 | mfcr r9; \ |
75 | std r9,area+EX_R13(r13); \ | 73 | extra(vec); \ |
76 | mfcr r9 | 74 | std r11,area+EX_R11(r13); \ |
75 | std r12,area+EX_R12(r13); \ | ||
76 | GET_SCRATCH0(r10); \ | ||
77 | std r10,area+EX_R13(r13) | ||
78 | #define EXCEPTION_PROLOG_1(area, extra, vec) \ | ||
79 | __EXCEPTION_PROLOG_1(area, extra, vec) | ||
77 | 80 | ||
78 | #define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ | 81 | #define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ |
79 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ | 82 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ |
@@ -85,13 +88,65 @@ | |||
85 | mtspr SPRN_##h##SRR1,r10; \ | 88 | mtspr SPRN_##h##SRR1,r10; \ |
86 | h##rfid; \ | 89 | h##rfid; \ |
87 | b . /* prevent speculative execution */ | 90 | b . /* prevent speculative execution */ |
88 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ | 91 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ |
89 | __EXCEPTION_PROLOG_PSERIES_1(label, h) | 92 | __EXCEPTION_PROLOG_PSERIES_1(label, h) |
90 | 93 | ||
91 | #define EXCEPTION_PROLOG_PSERIES(area, label, h) \ | 94 | #define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \ |
92 | EXCEPTION_PROLOG_1(area); \ | 95 | EXCEPTION_PROLOG_1(area, extra, vec); \ |
93 | EXCEPTION_PROLOG_PSERIES_1(label, h); | 96 | EXCEPTION_PROLOG_PSERIES_1(label, h); |
94 | 97 | ||
98 | #define __KVMTEST(n) \ | ||
99 | lbz r10,HSTATE_IN_GUEST(r13); \ | ||
100 | cmpwi r10,0; \ | ||
101 | bne do_kvm_##n | ||
102 | |||
103 | #define __KVM_HANDLER(area, h, n) \ | ||
104 | do_kvm_##n: \ | ||
105 | ld r10,area+EX_R10(r13); \ | ||
106 | stw r9,HSTATE_SCRATCH1(r13); \ | ||
107 | ld r9,area+EX_R9(r13); \ | ||
108 | std r12,HSTATE_SCRATCH0(r13); \ | ||
109 | li r12,n; \ | ||
110 | b kvmppc_interrupt | ||
111 | |||
112 | #define __KVM_HANDLER_SKIP(area, h, n) \ | ||
113 | do_kvm_##n: \ | ||
114 | cmpwi r10,KVM_GUEST_MODE_SKIP; \ | ||
115 | ld r10,area+EX_R10(r13); \ | ||
116 | beq 89f; \ | ||
117 | stw r9,HSTATE_SCRATCH1(r13); \ | ||
118 | ld r9,area+EX_R9(r13); \ | ||
119 | std r12,HSTATE_SCRATCH0(r13); \ | ||
120 | li r12,n; \ | ||
121 | b kvmppc_interrupt; \ | ||
122 | 89: mtocrf 0x80,r9; \ | ||
123 | ld r9,area+EX_R9(r13); \ | ||
124 | b kvmppc_skip_##h##interrupt | ||
125 | |||
126 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
127 | #define KVMTEST(n) __KVMTEST(n) | ||
128 | #define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n) | ||
129 | #define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) | ||
130 | |||
131 | #else | ||
132 | #define KVMTEST(n) | ||
133 | #define KVM_HANDLER(area, h, n) | ||
134 | #define KVM_HANDLER_SKIP(area, h, n) | ||
135 | #endif | ||
136 | |||
137 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
138 | #define KVMTEST_PR(n) __KVMTEST(n) | ||
139 | #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) | ||
140 | #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) | ||
141 | |||
142 | #else | ||
143 | #define KVMTEST_PR(n) | ||
144 | #define KVM_HANDLER_PR(area, h, n) | ||
145 | #define KVM_HANDLER_PR_SKIP(area, h, n) | ||
146 | #endif | ||
147 | |||
148 | #define NOTEST(n) | ||
149 | |||
95 | /* | 150 | /* |
96 | * The common exception prolog is used for all except a few exceptions | 151 | * The common exception prolog is used for all except a few exceptions |
97 | * such as a segment miss on a kernel address. We have to be prepared | 152 | * such as a segment miss on a kernel address. We have to be prepared |
@@ -164,57 +219,58 @@ | |||
164 | .globl label##_pSeries; \ | 219 | .globl label##_pSeries; \ |
165 | label##_pSeries: \ | 220 | label##_pSeries: \ |
166 | HMT_MEDIUM; \ | 221 | HMT_MEDIUM; \ |
167 | DO_KVM vec; \ | ||
168 | SET_SCRATCH0(r13); /* save r13 */ \ | 222 | SET_SCRATCH0(r13); /* save r13 */ \ |
169 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD) | 223 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
224 | EXC_STD, KVMTEST_PR, vec) | ||
170 | 225 | ||
171 | #define STD_EXCEPTION_HV(loc, vec, label) \ | 226 | #define STD_EXCEPTION_HV(loc, vec, label) \ |
172 | . = loc; \ | 227 | . = loc; \ |
173 | .globl label##_hv; \ | 228 | .globl label##_hv; \ |
174 | label##_hv: \ | 229 | label##_hv: \ |
175 | HMT_MEDIUM; \ | 230 | HMT_MEDIUM; \ |
176 | DO_KVM vec; \ | 231 | SET_SCRATCH0(r13); /* save r13 */ \ |
177 | SET_SCRATCH0(r13); /* save r13 */ \ | 232 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
178 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV) | 233 | EXC_HV, KVMTEST, vec) |
179 | 234 | ||
180 | #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ | 235 | #define __SOFTEN_TEST(h) \ |
181 | HMT_MEDIUM; \ | ||
182 | DO_KVM vec; \ | ||
183 | SET_SCRATCH0(r13); /* save r13 */ \ | ||
184 | GET_PACA(r13); \ | ||
185 | std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ | ||
186 | std r10,PACA_EXGEN+EX_R10(r13); \ | ||
187 | lbz r10,PACASOFTIRQEN(r13); \ | 236 | lbz r10,PACASOFTIRQEN(r13); \ |
188 | mfcr r9; \ | ||
189 | cmpwi r10,0; \ | 237 | cmpwi r10,0; \ |
190 | beq masked_##h##interrupt; \ | 238 | beq masked_##h##interrupt |
191 | GET_SCRATCH0(r10); \ | 239 | #define _SOFTEN_TEST(h) __SOFTEN_TEST(h) |
192 | std r10,PACA_EXGEN+EX_R13(r13); \ | 240 | |
193 | std r11,PACA_EXGEN+EX_R11(r13); \ | 241 | #define SOFTEN_TEST_PR(vec) \ |
194 | std r12,PACA_EXGEN+EX_R12(r13); \ | 242 | KVMTEST_PR(vec); \ |
195 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ | 243 | _SOFTEN_TEST(EXC_STD) |
196 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ | 244 | |
197 | mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ | 245 | #define SOFTEN_TEST_HV(vec) \ |
198 | LOAD_HANDLER(r12,label##_common) \ | 246 | KVMTEST(vec); \ |
199 | mtspr SPRN_##h##SRR0,r12; \ | 247 | _SOFTEN_TEST(EXC_HV) |
200 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ | 248 | |
201 | mtspr SPRN_##h##SRR1,r10; \ | 249 | #define SOFTEN_TEST_HV_201(vec) \ |
202 | h##rfid; \ | 250 | KVMTEST(vec); \ |
203 | b . /* prevent speculative execution */ | 251 | _SOFTEN_TEST(EXC_STD) |
204 | #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ | 252 | |
205 | __MASKABLE_EXCEPTION_PSERIES(vec, label, h) | 253 | #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ |
254 | HMT_MEDIUM; \ | ||
255 | SET_SCRATCH0(r13); /* save r13 */ \ | ||
256 | __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ | ||
257 | EXCEPTION_PROLOG_PSERIES_1(label##_common, h); | ||
258 | #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ | ||
259 | __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) | ||
206 | 260 | ||
207 | #define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ | 261 | #define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ |
208 | . = loc; \ | 262 | . = loc; \ |
209 | .globl label##_pSeries; \ | 263 | .globl label##_pSeries; \ |
210 | label##_pSeries: \ | 264 | label##_pSeries: \ |
211 | _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD) | 265 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ |
266 | EXC_STD, SOFTEN_TEST_PR) | ||
212 | 267 | ||
213 | #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ | 268 | #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ |
214 | . = loc; \ | 269 | . = loc; \ |
215 | .globl label##_hv; \ | 270 | .globl label##_hv; \ |
216 | label##_hv: \ | 271 | label##_hv: \ |
217 | _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV) | 272 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ |
273 | EXC_HV, SOFTEN_TEST_HV) | ||
218 | 274 | ||
219 | #ifdef CONFIG_PPC_ISERIES | 275 | #ifdef CONFIG_PPC_ISERIES |
220 | #define DISABLE_INTS \ | 276 | #define DISABLE_INTS \ |
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h new file mode 100644 index 000000000000..922d9b5fe3d5 --- /dev/null +++ b/arch/powerpc/include/asm/fsl_hcalls.h | |||
@@ -0,0 +1,655 @@ | |||
1 | /* | ||
2 | * Freescale hypervisor call interface | ||
3 | * | ||
4 | * Copyright 2008-2010 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Author: Timur Tabi <timur@freescale.com> | ||
7 | * | ||
8 | * This file is provided under a dual BSD/GPL license. When using or | ||
9 | * redistributing this file, you may do so under either license. | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or without | ||
12 | * modification, are permitted provided that the following conditions are met: | ||
13 | * * Redistributions of source code must retain the above copyright | ||
14 | * notice, this list of conditions and the following disclaimer. | ||
15 | * * Redistributions in binary form must reproduce the above copyright | ||
16 | * notice, this list of conditions and the following disclaimer in the | ||
17 | * documentation and/or other materials provided with the distribution. | ||
18 | * * Neither the name of Freescale Semiconductor nor the | ||
19 | * names of its contributors may be used to endorse or promote products | ||
20 | * derived from this software without specific prior written permission. | ||
21 | * | ||
22 | * | ||
23 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
24 | * GNU General Public License ("GPL") as published by the Free Software | ||
25 | * Foundation, either version 2 of that License or (at your option) any | ||
26 | * later version. | ||
27 | * | ||
28 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
29 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
30 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
31 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
32 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
33 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
34 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
35 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
37 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
38 | */ | ||
39 | |||
40 | #ifndef _FSL_HCALLS_H | ||
41 | #define _FSL_HCALLS_H | ||
42 | |||
43 | #include <linux/types.h> | ||
44 | #include <linux/errno.h> | ||
45 | #include <asm/byteorder.h> | ||
46 | #include <asm/epapr_hcalls.h> | ||
47 | |||
48 | #define FH_API_VERSION 1 | ||
49 | |||
50 | #define FH_ERR_GET_INFO 1 | ||
51 | #define FH_PARTITION_GET_DTPROP 2 | ||
52 | #define FH_PARTITION_SET_DTPROP 3 | ||
53 | #define FH_PARTITION_RESTART 4 | ||
54 | #define FH_PARTITION_GET_STATUS 5 | ||
55 | #define FH_PARTITION_START 6 | ||
56 | #define FH_PARTITION_STOP 7 | ||
57 | #define FH_PARTITION_MEMCPY 8 | ||
58 | #define FH_DMA_ENABLE 9 | ||
59 | #define FH_DMA_DISABLE 10 | ||
60 | #define FH_SEND_NMI 11 | ||
61 | #define FH_VMPIC_GET_MSIR 12 | ||
62 | #define FH_SYSTEM_RESET 13 | ||
63 | #define FH_GET_CORE_STATE 14 | ||
64 | #define FH_ENTER_NAP 15 | ||
65 | #define FH_EXIT_NAP 16 | ||
66 | #define FH_CLAIM_DEVICE 17 | ||
67 | #define FH_PARTITION_STOP_DMA 18 | ||
68 | |||
69 | /* vendor ID: Freescale Semiconductor */ | ||
70 | #define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num) | ||
71 | |||
72 | /* | ||
73 | * We use "uintptr_t" to define a register because it's guaranteed to be a | ||
74 | * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit | ||
75 | * platform. | ||
76 | * | ||
77 | * All registers are either input/output or output only. Registers that are | ||
78 | * initialized before making the hypercall are input/output. All | ||
79 | * input/output registers are represented with "+r". Output-only registers | ||
80 | * are represented with "=r". Do not specify any unused registers. The | ||
81 | * clobber list will tell the compiler that the hypercall modifies those | ||
82 | * registers, which is good enough. | ||
83 | */ | ||
84 | |||
85 | /** | ||
86 | * fh_send_nmi - send NMI to virtual cpu(s). | ||
87 | * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask. | ||
88 | * | ||
89 | * Returns 0 for success, or EINVAL for invalid vcpu_mask. | ||
90 | */ | ||
91 | static inline unsigned int fh_send_nmi(unsigned int vcpu_mask) | ||
92 | { | ||
93 | register uintptr_t r11 __asm__("r11"); | ||
94 | register uintptr_t r3 __asm__("r3"); | ||
95 | |||
96 | r11 = FH_HCALL_TOKEN(FH_SEND_NMI); | ||
97 | r3 = vcpu_mask; | ||
98 | |||
99 | __asm__ __volatile__ ("sc 1" | ||
100 | : "+r" (r11), "+r" (r3) | ||
101 | : : EV_HCALL_CLOBBERS1 | ||
102 | ); | ||
103 | |||
104 | return r3; | ||
105 | } | ||
106 | |||
107 | /* Arbitrary limits to avoid excessive memory allocation in hypervisor */ | ||
108 | #define FH_DTPROP_MAX_PATHLEN 4096 | ||
109 | #define FH_DTPROP_MAX_PROPLEN 32768 | ||
110 | |||
111 | /** | ||
112 | * fh_partiton_get_dtprop - get a property from a guest device tree. | ||
113 | * @handle: handle of partition whose device tree is to be accessed | ||
114 | * @dtpath_addr: physical address of device tree path to access | ||
115 | * @propname_addr: physical address of name of property | ||
116 | * @propvalue_addr: physical address of property value buffer | ||
117 | * @propvalue_len: length of buffer on entry, length of property on return | ||
118 | * | ||
119 | * Returns zero on success, non-zero on error. | ||
120 | */ | ||
121 | static inline unsigned int fh_partition_get_dtprop(int handle, | ||
122 | uint64_t dtpath_addr, | ||
123 | uint64_t propname_addr, | ||
124 | uint64_t propvalue_addr, | ||
125 | uint32_t *propvalue_len) | ||
126 | { | ||
127 | register uintptr_t r11 __asm__("r11"); | ||
128 | register uintptr_t r3 __asm__("r3"); | ||
129 | register uintptr_t r4 __asm__("r4"); | ||
130 | register uintptr_t r5 __asm__("r5"); | ||
131 | register uintptr_t r6 __asm__("r6"); | ||
132 | register uintptr_t r7 __asm__("r7"); | ||
133 | register uintptr_t r8 __asm__("r8"); | ||
134 | register uintptr_t r9 __asm__("r9"); | ||
135 | register uintptr_t r10 __asm__("r10"); | ||
136 | |||
137 | r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP); | ||
138 | r3 = handle; | ||
139 | |||
140 | #ifdef CONFIG_PHYS_64BIT | ||
141 | r4 = dtpath_addr >> 32; | ||
142 | r6 = propname_addr >> 32; | ||
143 | r8 = propvalue_addr >> 32; | ||
144 | #else | ||
145 | r4 = 0; | ||
146 | r6 = 0; | ||
147 | r8 = 0; | ||
148 | #endif | ||
149 | r5 = (uint32_t)dtpath_addr; | ||
150 | r7 = (uint32_t)propname_addr; | ||
151 | r9 = (uint32_t)propvalue_addr; | ||
152 | r10 = *propvalue_len; | ||
153 | |||
154 | __asm__ __volatile__ ("sc 1" | ||
155 | : "+r" (r11), | ||
156 | "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), | ||
157 | "+r" (r8), "+r" (r9), "+r" (r10) | ||
158 | : : EV_HCALL_CLOBBERS8 | ||
159 | ); | ||
160 | |||
161 | *propvalue_len = r4; | ||
162 | return r3; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * Set a property in a guest device tree. | ||
167 | * @handle: handle of partition whose device tree is to be accessed | ||
168 | * @dtpath_addr: physical address of device tree path to access | ||
169 | * @propname_addr: physical address of name of property | ||
170 | * @propvalue_addr: physical address of property value | ||
171 | * @propvalue_len: length of property | ||
172 | * | ||
173 | * Returns zero on success, non-zero on error. | ||
174 | */ | ||
175 | static inline unsigned int fh_partition_set_dtprop(int handle, | ||
176 | uint64_t dtpath_addr, | ||
177 | uint64_t propname_addr, | ||
178 | uint64_t propvalue_addr, | ||
179 | uint32_t propvalue_len) | ||
180 | { | ||
181 | register uintptr_t r11 __asm__("r11"); | ||
182 | register uintptr_t r3 __asm__("r3"); | ||
183 | register uintptr_t r4 __asm__("r4"); | ||
184 | register uintptr_t r6 __asm__("r6"); | ||
185 | register uintptr_t r8 __asm__("r8"); | ||
186 | register uintptr_t r5 __asm__("r5"); | ||
187 | register uintptr_t r7 __asm__("r7"); | ||
188 | register uintptr_t r9 __asm__("r9"); | ||
189 | register uintptr_t r10 __asm__("r10"); | ||
190 | |||
191 | r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP); | ||
192 | r3 = handle; | ||
193 | |||
194 | #ifdef CONFIG_PHYS_64BIT | ||
195 | r4 = dtpath_addr >> 32; | ||
196 | r6 = propname_addr >> 32; | ||
197 | r8 = propvalue_addr >> 32; | ||
198 | #else | ||
199 | r4 = 0; | ||
200 | r6 = 0; | ||
201 | r8 = 0; | ||
202 | #endif | ||
203 | r5 = (uint32_t)dtpath_addr; | ||
204 | r7 = (uint32_t)propname_addr; | ||
205 | r9 = (uint32_t)propvalue_addr; | ||
206 | r10 = propvalue_len; | ||
207 | |||
208 | __asm__ __volatile__ ("sc 1" | ||
209 | : "+r" (r11), | ||
210 | "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), | ||
211 | "+r" (r8), "+r" (r9), "+r" (r10) | ||
212 | : : EV_HCALL_CLOBBERS8 | ||
213 | ); | ||
214 | |||
215 | return r3; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * fh_partition_restart - reboot the current partition | ||
220 | * @partition: partition ID | ||
221 | * | ||
222 | * Returns an error code if reboot failed. Does not return if it succeeds. | ||
223 | */ | ||
224 | static inline unsigned int fh_partition_restart(unsigned int partition) | ||
225 | { | ||
226 | register uintptr_t r11 __asm__("r11"); | ||
227 | register uintptr_t r3 __asm__("r3"); | ||
228 | |||
229 | r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART); | ||
230 | r3 = partition; | ||
231 | |||
232 | __asm__ __volatile__ ("sc 1" | ||
233 | : "+r" (r11), "+r" (r3) | ||
234 | : : EV_HCALL_CLOBBERS1 | ||
235 | ); | ||
236 | |||
237 | return r3; | ||
238 | } | ||
239 | |||
240 | #define FH_PARTITION_STOPPED 0 | ||
241 | #define FH_PARTITION_RUNNING 1 | ||
242 | #define FH_PARTITION_STARTING 2 | ||
243 | #define FH_PARTITION_STOPPING 3 | ||
244 | #define FH_PARTITION_PAUSING 4 | ||
245 | #define FH_PARTITION_PAUSED 5 | ||
246 | #define FH_PARTITION_RESUMING 6 | ||
247 | |||
248 | /** | ||
249 | * fh_partition_get_status - gets the status of a partition | ||
250 | * @partition: partition ID | ||
251 | * @status: returned status code | ||
252 | * | ||
253 | * Returns 0 for success, or an error code. | ||
254 | */ | ||
255 | static inline unsigned int fh_partition_get_status(unsigned int partition, | ||
256 | unsigned int *status) | ||
257 | { | ||
258 | register uintptr_t r11 __asm__("r11"); | ||
259 | register uintptr_t r3 __asm__("r3"); | ||
260 | register uintptr_t r4 __asm__("r4"); | ||
261 | |||
262 | r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS); | ||
263 | r3 = partition; | ||
264 | |||
265 | __asm__ __volatile__ ("sc 1" | ||
266 | : "+r" (r11), "+r" (r3), "=r" (r4) | ||
267 | : : EV_HCALL_CLOBBERS2 | ||
268 | ); | ||
269 | |||
270 | *status = r4; | ||
271 | |||
272 | return r3; | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * fh_partition_start - boots and starts execution of the specified partition | ||
277 | * @partition: partition ID | ||
278 | * @entry_point: guest physical address to start execution | ||
279 | * | ||
280 | * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot | ||
281 | * time, guest physical address are the same as guest virtual addresses. | ||
282 | * | ||
283 | * Returns 0 for success, or an error code. | ||
284 | */ | ||
285 | static inline unsigned int fh_partition_start(unsigned int partition, | ||
286 | uint32_t entry_point, int load) | ||
287 | { | ||
288 | register uintptr_t r11 __asm__("r11"); | ||
289 | register uintptr_t r3 __asm__("r3"); | ||
290 | register uintptr_t r4 __asm__("r4"); | ||
291 | register uintptr_t r5 __asm__("r5"); | ||
292 | |||
293 | r11 = FH_HCALL_TOKEN(FH_PARTITION_START); | ||
294 | r3 = partition; | ||
295 | r4 = entry_point; | ||
296 | r5 = load; | ||
297 | |||
298 | __asm__ __volatile__ ("sc 1" | ||
299 | : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5) | ||
300 | : : EV_HCALL_CLOBBERS3 | ||
301 | ); | ||
302 | |||
303 | return r3; | ||
304 | } | ||
305 | |||
306 | /** | ||
307 | * fh_partition_stop - stops another partition | ||
308 | * @partition: partition ID | ||
309 | * | ||
310 | * Returns 0 for success, or an error code. | ||
311 | */ | ||
312 | static inline unsigned int fh_partition_stop(unsigned int partition) | ||
313 | { | ||
314 | register uintptr_t r11 __asm__("r11"); | ||
315 | register uintptr_t r3 __asm__("r3"); | ||
316 | |||
317 | r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP); | ||
318 | r3 = partition; | ||
319 | |||
320 | __asm__ __volatile__ ("sc 1" | ||
321 | : "+r" (r11), "+r" (r3) | ||
322 | : : EV_HCALL_CLOBBERS1 | ||
323 | ); | ||
324 | |||
325 | return r3; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * struct fh_sg_list: definition of the fh_partition_memcpy S/G list | ||
330 | * @source: guest physical address to copy from | ||
331 | * @target: guest physical address to copy to | ||
332 | * @size: number of bytes to copy | ||
333 | * @reserved: reserved, must be zero | ||
334 | * | ||
335 | * The scatter/gather list for fh_partition_memcpy() is an array of these | ||
336 | * structures. The array must be guest physically contiguous. | ||
337 | * | ||
338 | * This structure must be aligned on 32-byte boundary, so that no single | ||
339 | * strucuture can span two pages. | ||
340 | */ | ||
341 | struct fh_sg_list { | ||
342 | uint64_t source; /**< guest physical address to copy from */ | ||
343 | uint64_t target; /**< guest physical address to copy to */ | ||
344 | uint64_t size; /**< number of bytes to copy */ | ||
345 | uint64_t reserved; /**< reserved, must be zero */ | ||
346 | } __attribute__ ((aligned(32))); | ||
347 | |||
348 | /** | ||
349 | * fh_partition_memcpy - copies data from one guest to another | ||
350 | * @source: the ID of the partition to copy from | ||
351 | * @target: the ID of the partition to copy to | ||
352 | * @sg_list: guest physical address of an array of &fh_sg_list structures | ||
353 | * @count: the number of entries in @sg_list | ||
354 | * | ||
355 | * Returns 0 for success, or an error code. | ||
356 | */ | ||
357 | static inline unsigned int fh_partition_memcpy(unsigned int source, | ||
358 | unsigned int target, phys_addr_t sg_list, unsigned int count) | ||
359 | { | ||
360 | register uintptr_t r11 __asm__("r11"); | ||
361 | register uintptr_t r3 __asm__("r3"); | ||
362 | register uintptr_t r4 __asm__("r4"); | ||
363 | register uintptr_t r5 __asm__("r5"); | ||
364 | register uintptr_t r6 __asm__("r6"); | ||
365 | register uintptr_t r7 __asm__("r7"); | ||
366 | |||
367 | r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY); | ||
368 | r3 = source; | ||
369 | r4 = target; | ||
370 | r5 = (uint32_t) sg_list; | ||
371 | |||
372 | #ifdef CONFIG_PHYS_64BIT | ||
373 | r6 = sg_list >> 32; | ||
374 | #else | ||
375 | r6 = 0; | ||
376 | #endif | ||
377 | r7 = count; | ||
378 | |||
379 | __asm__ __volatile__ ("sc 1" | ||
380 | : "+r" (r11), | ||
381 | "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7) | ||
382 | : : EV_HCALL_CLOBBERS5 | ||
383 | ); | ||
384 | |||
385 | return r3; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * fh_dma_enable - enable DMA for the specified device | ||
390 | * @liodn: the LIODN of the I/O device for which to enable DMA | ||
391 | * | ||
392 | * Returns 0 for success, or an error code. | ||
393 | */ | ||
394 | static inline unsigned int fh_dma_enable(unsigned int liodn) | ||
395 | { | ||
396 | register uintptr_t r11 __asm__("r11"); | ||
397 | register uintptr_t r3 __asm__("r3"); | ||
398 | |||
399 | r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE); | ||
400 | r3 = liodn; | ||
401 | |||
402 | __asm__ __volatile__ ("sc 1" | ||
403 | : "+r" (r11), "+r" (r3) | ||
404 | : : EV_HCALL_CLOBBERS1 | ||
405 | ); | ||
406 | |||
407 | return r3; | ||
408 | } | ||
409 | |||
410 | /** | ||
411 | * fh_dma_disable - disable DMA for the specified device | ||
412 | * @liodn: the LIODN of the I/O device for which to disable DMA | ||
413 | * | ||
414 | * Returns 0 for success, or an error code. | ||
415 | */ | ||
416 | static inline unsigned int fh_dma_disable(unsigned int liodn) | ||
417 | { | ||
418 | register uintptr_t r11 __asm__("r11"); | ||
419 | register uintptr_t r3 __asm__("r3"); | ||
420 | |||
421 | r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE); | ||
422 | r3 = liodn; | ||
423 | |||
424 | __asm__ __volatile__ ("sc 1" | ||
425 | : "+r" (r11), "+r" (r3) | ||
426 | : : EV_HCALL_CLOBBERS1 | ||
427 | ); | ||
428 | |||
429 | return r3; | ||
430 | } | ||
431 | |||
432 | |||
433 | /** | ||
434 | * fh_vmpic_get_msir - returns the MPIC-MSI register value | ||
435 | * @interrupt: the interrupt number | ||
436 | * @msir_val: returned MPIC-MSI register value | ||
437 | * | ||
438 | * Returns 0 for success, or an error code. | ||
439 | */ | ||
440 | static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt, | ||
441 | unsigned int *msir_val) | ||
442 | { | ||
443 | register uintptr_t r11 __asm__("r11"); | ||
444 | register uintptr_t r3 __asm__("r3"); | ||
445 | register uintptr_t r4 __asm__("r4"); | ||
446 | |||
447 | r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR); | ||
448 | r3 = interrupt; | ||
449 | |||
450 | __asm__ __volatile__ ("sc 1" | ||
451 | : "+r" (r11), "+r" (r3), "=r" (r4) | ||
452 | : : EV_HCALL_CLOBBERS2 | ||
453 | ); | ||
454 | |||
455 | *msir_val = r4; | ||
456 | |||
457 | return r3; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * fh_system_reset - reset the system | ||
462 | * | ||
463 | * Returns 0 for success, or an error code. | ||
464 | */ | ||
465 | static inline unsigned int fh_system_reset(void) | ||
466 | { | ||
467 | register uintptr_t r11 __asm__("r11"); | ||
468 | register uintptr_t r3 __asm__("r3"); | ||
469 | |||
470 | r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET); | ||
471 | |||
472 | __asm__ __volatile__ ("sc 1" | ||
473 | : "+r" (r11), "=r" (r3) | ||
474 | : : EV_HCALL_CLOBBERS1 | ||
475 | ); | ||
476 | |||
477 | return r3; | ||
478 | } | ||
479 | |||
480 | |||
481 | /** | ||
482 | * fh_err_get_info - get platform error information | ||
483 | * @queue id: | ||
484 | * 0 for guest error event queue | ||
485 | * 1 for global error event queue | ||
486 | * | ||
487 | * @pointer to store the platform error data: | ||
488 | * platform error data is returned in registers r4 - r11 | ||
489 | * | ||
490 | * Returns 0 for success, or an error code. | ||
491 | */ | ||
492 | static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize, | ||
493 | uint32_t addr_hi, uint32_t addr_lo, int peek) | ||
494 | { | ||
495 | register uintptr_t r11 __asm__("r11"); | ||
496 | register uintptr_t r3 __asm__("r3"); | ||
497 | register uintptr_t r4 __asm__("r4"); | ||
498 | register uintptr_t r5 __asm__("r5"); | ||
499 | register uintptr_t r6 __asm__("r6"); | ||
500 | register uintptr_t r7 __asm__("r7"); | ||
501 | |||
502 | r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO); | ||
503 | r3 = queue; | ||
504 | r4 = *bufsize; | ||
505 | r5 = addr_hi; | ||
506 | r6 = addr_lo; | ||
507 | r7 = peek; | ||
508 | |||
509 | __asm__ __volatile__ ("sc 1" | ||
510 | : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), | ||
511 | "+r" (r7) | ||
512 | : : EV_HCALL_CLOBBERS5 | ||
513 | ); | ||
514 | |||
515 | *bufsize = r4; | ||
516 | |||
517 | return r3; | ||
518 | } | ||
519 | |||
520 | |||
521 | #define FH_VCPU_RUN 0 | ||
522 | #define FH_VCPU_IDLE 1 | ||
523 | #define FH_VCPU_NAP 2 | ||
524 | |||
525 | /** | ||
526 | * fh_get_core_state - get the state of a vcpu | ||
527 | * | ||
528 | * @handle: handle of partition containing the vcpu | ||
529 | * @vcpu: vcpu number within the partition | ||
530 | * @state:the current state of the vcpu, see FH_VCPU_* | ||
531 | * | ||
532 | * Returns 0 for success, or an error code. | ||
533 | */ | ||
534 | static inline unsigned int fh_get_core_state(unsigned int handle, | ||
535 | unsigned int vcpu, unsigned int *state) | ||
536 | { | ||
537 | register uintptr_t r11 __asm__("r11"); | ||
538 | register uintptr_t r3 __asm__("r3"); | ||
539 | register uintptr_t r4 __asm__("r4"); | ||
540 | |||
541 | r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE); | ||
542 | r3 = handle; | ||
543 | r4 = vcpu; | ||
544 | |||
545 | __asm__ __volatile__ ("sc 1" | ||
546 | : "+r" (r11), "+r" (r3), "+r" (r4) | ||
547 | : : EV_HCALL_CLOBBERS2 | ||
548 | ); | ||
549 | |||
550 | *state = r4; | ||
551 | return r3; | ||
552 | } | ||
553 | |||
554 | /** | ||
555 | * fh_enter_nap - enter nap on a vcpu | ||
556 | * | ||
557 | * Note that though the API supports entering nap on a vcpu other | ||
558 | * than the caller, this may not be implmented and may return EINVAL. | ||
559 | * | ||
560 | * @handle: handle of partition containing the vcpu | ||
561 | * @vcpu: vcpu number within the partition | ||
562 | * | ||
563 | * Returns 0 for success, or an error code. | ||
564 | */ | ||
565 | static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu) | ||
566 | { | ||
567 | register uintptr_t r11 __asm__("r11"); | ||
568 | register uintptr_t r3 __asm__("r3"); | ||
569 | register uintptr_t r4 __asm__("r4"); | ||
570 | |||
571 | r11 = FH_HCALL_TOKEN(FH_ENTER_NAP); | ||
572 | r3 = handle; | ||
573 | r4 = vcpu; | ||
574 | |||
575 | __asm__ __volatile__ ("sc 1" | ||
576 | : "+r" (r11), "+r" (r3), "+r" (r4) | ||
577 | : : EV_HCALL_CLOBBERS2 | ||
578 | ); | ||
579 | |||
580 | return r3; | ||
581 | } | ||
582 | |||
583 | /** | ||
584 | * fh_exit_nap - exit nap on a vcpu | ||
585 | * @handle: handle of partition containing the vcpu | ||
586 | * @vcpu: vcpu number within the partition | ||
587 | * | ||
588 | * Returns 0 for success, or an error code. | ||
589 | */ | ||
590 | static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu) | ||
591 | { | ||
592 | register uintptr_t r11 __asm__("r11"); | ||
593 | register uintptr_t r3 __asm__("r3"); | ||
594 | register uintptr_t r4 __asm__("r4"); | ||
595 | |||
596 | r11 = FH_HCALL_TOKEN(FH_EXIT_NAP); | ||
597 | r3 = handle; | ||
598 | r4 = vcpu; | ||
599 | |||
600 | __asm__ __volatile__ ("sc 1" | ||
601 | : "+r" (r11), "+r" (r3), "+r" (r4) | ||
602 | : : EV_HCALL_CLOBBERS2 | ||
603 | ); | ||
604 | |||
605 | return r3; | ||
606 | } | ||
607 | /** | ||
608 | * fh_claim_device - claim a "claimable" shared device | ||
609 | * @handle: fsl,hv-device-handle of node to claim | ||
610 | * | ||
611 | * Returns 0 for success, or an error code. | ||
612 | */ | ||
613 | static inline unsigned int fh_claim_device(unsigned int handle) | ||
614 | { | ||
615 | register uintptr_t r11 __asm__("r11"); | ||
616 | register uintptr_t r3 __asm__("r3"); | ||
617 | |||
618 | r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE); | ||
619 | r3 = handle; | ||
620 | |||
621 | __asm__ __volatile__ ("sc 1" | ||
622 | : "+r" (r11), "+r" (r3) | ||
623 | : : EV_HCALL_CLOBBERS1 | ||
624 | ); | ||
625 | |||
626 | return r3; | ||
627 | } | ||
628 | |||
629 | /** | ||
630 | * Run deferred DMA disabling on a partition's private devices | ||
631 | * | ||
632 | * This applies to devices which a partition owns either privately, | ||
633 | * or which are claimable and still actively owned by that partition, | ||
634 | * and which do not have the no-dma-disable property. | ||
635 | * | ||
636 | * @handle: partition (must be stopped) whose DMA is to be disabled | ||
637 | * | ||
638 | * Returns 0 for success, or an error code. | ||
639 | */ | ||
640 | static inline unsigned int fh_partition_stop_dma(unsigned int handle) | ||
641 | { | ||
642 | register uintptr_t r11 __asm__("r11"); | ||
643 | register uintptr_t r3 __asm__("r3"); | ||
644 | |||
645 | r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA); | ||
646 | r3 = handle; | ||
647 | |||
648 | __asm__ __volatile__ ("sc 1" | ||
649 | : "+r" (r11), "+r" (r3) | ||
650 | : : EV_HCALL_CLOBBERS1 | ||
651 | ); | ||
652 | |||
653 | return r3; | ||
654 | } | ||
655 | #endif | ||
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index fd8201dddd4b..1c324ff55ea8 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -29,6 +29,10 @@ | |||
29 | #define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ | 29 | #define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \ |
30 | is a good time to retry */ | 30 | is a good time to retry */ |
31 | #define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ | 31 | #define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */ |
32 | |||
33 | /* Internal value used in book3s_hv kvm support; not returned to guests */ | ||
34 | #define H_TOO_HARD 9999 | ||
35 | |||
32 | #define H_HARDWARE -1 /* Hardware error */ | 36 | #define H_HARDWARE -1 /* Hardware error */ |
33 | #define H_FUNCTION -2 /* Function not supported */ | 37 | #define H_FUNCTION -2 /* Function not supported */ |
34 | #define H_PRIVILEGE -3 /* Caller not privileged */ | 38 | #define H_PRIVILEGE -3 /* Caller not privileged */ |
@@ -100,6 +104,7 @@ | |||
100 | #define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE | 104 | #define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE |
101 | #define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ | 105 | #define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */ |
102 | #define H_ANDCOND (1UL<<(63-33)) | 106 | #define H_ANDCOND (1UL<<(63-33)) |
107 | #define H_LOCAL (1UL<<(63-35)) | ||
103 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ | 108 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ |
104 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ | 109 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ |
105 | #define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ | 110 | #define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ |
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h new file mode 100644 index 000000000000..d3f64f361814 --- /dev/null +++ b/arch/powerpc/include/asm/hvsi.h | |||
@@ -0,0 +1,94 @@ | |||
1 | #ifndef _HVSI_H | ||
2 | #define _HVSI_H | ||
3 | |||
4 | #define VS_DATA_PACKET_HEADER 0xff | ||
5 | #define VS_CONTROL_PACKET_HEADER 0xfe | ||
6 | #define VS_QUERY_PACKET_HEADER 0xfd | ||
7 | #define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc | ||
8 | |||
9 | /* control verbs */ | ||
10 | #define VSV_SET_MODEM_CTL 1 /* to service processor only */ | ||
11 | #define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */ | ||
12 | #define VSV_CLOSE_PROTOCOL 3 | ||
13 | |||
14 | /* query verbs */ | ||
15 | #define VSV_SEND_VERSION_NUMBER 1 | ||
16 | #define VSV_SEND_MODEM_CTL_STATUS 2 | ||
17 | |||
18 | /* yes, these masks are not consecutive. */ | ||
19 | #define HVSI_TSDTR 0x01 | ||
20 | #define HVSI_TSCD 0x20 | ||
21 | |||
22 | #define HVSI_MAX_OUTGOING_DATA 12 | ||
23 | #define HVSI_VERSION 1 | ||
24 | |||
25 | struct hvsi_header { | ||
26 | uint8_t type; | ||
27 | uint8_t len; | ||
28 | uint16_t seqno; | ||
29 | } __attribute__((packed)); | ||
30 | |||
31 | struct hvsi_data { | ||
32 | struct hvsi_header hdr; | ||
33 | uint8_t data[HVSI_MAX_OUTGOING_DATA]; | ||
34 | } __attribute__((packed)); | ||
35 | |||
36 | struct hvsi_control { | ||
37 | struct hvsi_header hdr; | ||
38 | uint16_t verb; | ||
39 | /* optional depending on verb: */ | ||
40 | uint32_t word; | ||
41 | uint32_t mask; | ||
42 | } __attribute__((packed)); | ||
43 | |||
44 | struct hvsi_query { | ||
45 | struct hvsi_header hdr; | ||
46 | uint16_t verb; | ||
47 | } __attribute__((packed)); | ||
48 | |||
49 | struct hvsi_query_response { | ||
50 | struct hvsi_header hdr; | ||
51 | uint16_t verb; | ||
52 | uint16_t query_seqno; | ||
53 | union { | ||
54 | uint8_t version; | ||
55 | uint32_t mctrl_word; | ||
56 | } u; | ||
57 | } __attribute__((packed)); | ||
58 | |||
59 | /* hvsi lib struct definitions */ | ||
60 | #define HVSI_INBUF_SIZE 255 | ||
61 | struct tty_struct; | ||
62 | struct hvsi_priv { | ||
63 | unsigned int inbuf_len; /* data in input buffer */ | ||
64 | unsigned char inbuf[HVSI_INBUF_SIZE]; | ||
65 | unsigned int inbuf_cur; /* Cursor in input buffer */ | ||
66 | unsigned int inbuf_pktlen; /* packet lenght from cursor */ | ||
67 | atomic_t seqno; /* packet sequence number */ | ||
68 | unsigned int opened:1; /* driver opened */ | ||
69 | unsigned int established:1; /* protocol established */ | ||
70 | unsigned int is_console:1; /* used as a kernel console device */ | ||
71 | unsigned int mctrl_update:1; /* modem control updated */ | ||
72 | unsigned short mctrl; /* modem control */ | ||
73 | struct tty_struct *tty; /* tty structure */ | ||
74 | int (*get_chars)(uint32_t termno, char *buf, int count); | ||
75 | int (*put_chars)(uint32_t termno, const char *buf, int count); | ||
76 | uint32_t termno; | ||
77 | }; | ||
78 | |||
79 | /* hvsi lib functions */ | ||
80 | struct hvc_struct; | ||
81 | extern void hvsilib_init(struct hvsi_priv *pv, | ||
82 | int (*get_chars)(uint32_t termno, char *buf, int count), | ||
83 | int (*put_chars)(uint32_t termno, const char *buf, | ||
84 | int count), | ||
85 | int termno, int is_console); | ||
86 | extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp); | ||
87 | extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp); | ||
88 | extern int hvsilib_read_mctrl(struct hvsi_priv *pv); | ||
89 | extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr); | ||
90 | extern void hvsilib_establish(struct hvsi_priv *pv); | ||
91 | extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count); | ||
92 | extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count); | ||
93 | |||
94 | #endif /* _HVSI_H */ | ||
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 1c33ec17ca36..80fd4d2b4a62 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h | |||
@@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp); | |||
57 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); | 57 | extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); |
58 | 58 | ||
59 | extern struct pmu perf_ops_bp; | 59 | extern struct pmu perf_ops_bp; |
60 | extern void ptrace_triggered(struct perf_event *bp, int nmi, | 60 | extern void ptrace_triggered(struct perf_event *bp, |
61 | struct perf_sample_data *data, struct pt_regs *regs); | 61 | struct perf_sample_data *data, struct pt_regs *regs); |
62 | static inline void hw_breakpoint_disable(void) | 62 | static inline void hw_breakpoint_disable(void) |
63 | { | 63 | { |
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 1bff591f7f72..c0e1bc319e35 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/radix-tree.h> | 14 | #include <linux/radix-tree.h> |
15 | 15 | ||
16 | #include <asm/types.h> | 16 | #include <asm/types.h> |
17 | #include <asm/atomic.h> | 17 | #include <linux/atomic.h> |
18 | 18 | ||
19 | 19 | ||
20 | /* Define a way to iterate across irqs. */ | 20 | /* Define a way to iterate across irqs. */ |
@@ -330,5 +330,7 @@ extern int call_handle_irq(int irq, void *p1, | |||
330 | struct thread_info *tp, void *func); | 330 | struct thread_info *tp, void *func); |
331 | extern void do_IRQ(struct pt_regs *regs); | 331 | extern void do_IRQ(struct pt_regs *regs); |
332 | 332 | ||
333 | int irq_choose_cpu(const struct cpumask *mask); | ||
334 | |||
333 | #endif /* _ASM_IRQ_H */ | 335 | #endif /* _ASM_IRQ_H */ |
334 | #endif /* __KERNEL__ */ | 336 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h new file mode 100644 index 000000000000..1f780b95c0f0 --- /dev/null +++ b/arch/powerpc/include/asm/jump_label.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef _ASM_POWERPC_JUMP_LABEL_H | ||
2 | #define _ASM_POWERPC_JUMP_LABEL_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 2010 Michael Ellerman, IBM Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | |||
15 | #include <asm/feature-fixups.h> | ||
16 | |||
17 | #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) | ||
18 | #define JUMP_LABEL_NOP_SIZE 4 | ||
19 | |||
20 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | ||
21 | { | ||
22 | asm goto("1:\n\t" | ||
23 | "nop\n\t" | ||
24 | ".pushsection __jump_table, \"aw\"\n\t" | ||
25 | ".align 4\n\t" | ||
26 | JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" | ||
27 | ".popsection \n\t" | ||
28 | : : "i" (key) : : l_yes); | ||
29 | return false; | ||
30 | l_yes: | ||
31 | return true; | ||
32 | } | ||
33 | |||
34 | #ifdef CONFIG_PPC64 | ||
35 | typedef u64 jump_label_t; | ||
36 | #else | ||
37 | typedef u32 jump_label_t; | ||
38 | #endif | ||
39 | |||
40 | struct jump_entry { | ||
41 | jump_label_t code; | ||
42 | jump_label_t target; | ||
43 | jump_label_t key; | ||
44 | jump_label_t pad; | ||
45 | }; | ||
46 | |||
47 | #endif /* _ASM_POWERPC_JUMP_LABEL_H */ | ||
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index d2ca5ed3877b..a4f6c85431f8 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
@@ -22,6 +22,10 @@ | |||
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | 24 | ||
25 | /* Select powerpc specific features in <linux/kvm.h> */ | ||
26 | #define __KVM_HAVE_SPAPR_TCE | ||
27 | #define __KVM_HAVE_PPC_SMT | ||
28 | |||
25 | struct kvm_regs { | 29 | struct kvm_regs { |
26 | __u64 pc; | 30 | __u64 pc; |
27 | __u64 cr; | 31 | __u64 cr; |
@@ -272,4 +276,15 @@ struct kvm_guest_debug_arch { | |||
272 | #define KVM_INTERRUPT_UNSET -2U | 276 | #define KVM_INTERRUPT_UNSET -2U |
273 | #define KVM_INTERRUPT_SET_LEVEL -3U | 277 | #define KVM_INTERRUPT_SET_LEVEL -3U |
274 | 278 | ||
279 | /* for KVM_CAP_SPAPR_TCE */ | ||
280 | struct kvm_create_spapr_tce { | ||
281 | __u64 liobn; | ||
282 | __u32 window_size; | ||
283 | }; | ||
284 | |||
285 | /* for KVM_ALLOCATE_RMA */ | ||
286 | struct kvm_allocate_rma { | ||
287 | __u64 rma_size; | ||
288 | }; | ||
289 | |||
275 | #endif /* __LINUX_KVM_POWERPC_H */ | 290 | #endif /* __LINUX_KVM_POWERPC_H */ |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 0951b17f4eb5..7b1f0e0fc653 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -64,8 +64,12 @@ | |||
64 | #define BOOK3S_INTERRUPT_PROGRAM 0x700 | 64 | #define BOOK3S_INTERRUPT_PROGRAM 0x700 |
65 | #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 | 65 | #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 |
66 | #define BOOK3S_INTERRUPT_DECREMENTER 0x900 | 66 | #define BOOK3S_INTERRUPT_DECREMENTER 0x900 |
67 | #define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980 | ||
67 | #define BOOK3S_INTERRUPT_SYSCALL 0xc00 | 68 | #define BOOK3S_INTERRUPT_SYSCALL 0xc00 |
68 | #define BOOK3S_INTERRUPT_TRACE 0xd00 | 69 | #define BOOK3S_INTERRUPT_TRACE 0xd00 |
70 | #define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 | ||
71 | #define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20 | ||
72 | #define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40 | ||
69 | #define BOOK3S_INTERRUPT_PERFMON 0xf00 | 73 | #define BOOK3S_INTERRUPT_PERFMON 0xf00 |
70 | #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 | 74 | #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 |
71 | #define BOOK3S_INTERRUPT_VSX 0xf40 | 75 | #define BOOK3S_INTERRUPT_VSX 0xf40 |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d62e703f1214..98da010252a3 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -24,20 +24,6 @@ | |||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_book3s_asm.h> | 25 | #include <asm/kvm_book3s_asm.h> |
26 | 26 | ||
27 | struct kvmppc_slb { | ||
28 | u64 esid; | ||
29 | u64 vsid; | ||
30 | u64 orige; | ||
31 | u64 origv; | ||
32 | bool valid : 1; | ||
33 | bool Ks : 1; | ||
34 | bool Kp : 1; | ||
35 | bool nx : 1; | ||
36 | bool large : 1; /* PTEs are 16MB */ | ||
37 | bool tb : 1; /* 1TB segment */ | ||
38 | bool class : 1; | ||
39 | }; | ||
40 | |||
41 | struct kvmppc_bat { | 27 | struct kvmppc_bat { |
42 | u64 raw; | 28 | u64 raw; |
43 | u32 bepi; | 29 | u32 bepi; |
@@ -67,11 +53,22 @@ struct kvmppc_sid_map { | |||
67 | #define VSID_POOL_SIZE (SID_CONTEXTS * 16) | 53 | #define VSID_POOL_SIZE (SID_CONTEXTS * 16) |
68 | #endif | 54 | #endif |
69 | 55 | ||
56 | struct hpte_cache { | ||
57 | struct hlist_node list_pte; | ||
58 | struct hlist_node list_pte_long; | ||
59 | struct hlist_node list_vpte; | ||
60 | struct hlist_node list_vpte_long; | ||
61 | struct rcu_head rcu_head; | ||
62 | u64 host_va; | ||
63 | u64 pfn; | ||
64 | ulong slot; | ||
65 | struct kvmppc_pte pte; | ||
66 | }; | ||
67 | |||
70 | struct kvmppc_vcpu_book3s { | 68 | struct kvmppc_vcpu_book3s { |
71 | struct kvm_vcpu vcpu; | 69 | struct kvm_vcpu vcpu; |
72 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | 70 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; |
73 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; | 71 | struct kvmppc_sid_map sid_map[SID_MAP_NUM]; |
74 | struct kvmppc_slb slb[64]; | ||
75 | struct { | 72 | struct { |
76 | u64 esid; | 73 | u64 esid; |
77 | u64 vsid; | 74 | u64 vsid; |
@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s { | |||
81 | struct kvmppc_bat dbat[8]; | 78 | struct kvmppc_bat dbat[8]; |
82 | u64 hid[6]; | 79 | u64 hid[6]; |
83 | u64 gqr[8]; | 80 | u64 gqr[8]; |
84 | int slb_nr; | ||
85 | u64 sdr1; | 81 | u64 sdr1; |
86 | u64 hior; | 82 | u64 hior; |
87 | u64 msr_mask; | 83 | u64 msr_mask; |
@@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s { | |||
93 | u64 vsid_max; | 89 | u64 vsid_max; |
94 | #endif | 90 | #endif |
95 | int context_id[SID_CONTEXTS]; | 91 | int context_id[SID_CONTEXTS]; |
96 | ulong prog_flags; /* flags to inject when giving a 700 trap */ | 92 | |
93 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | ||
94 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | ||
95 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | ||
96 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | ||
97 | int hpte_cache_count; | ||
98 | spinlock_t mmu_lock; | ||
97 | }; | 99 | }; |
98 | 100 | ||
99 | #define CONTEXT_HOST 0 | 101 | #define CONTEXT_HOST 0 |
@@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask) | |||
110 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); | 112 | extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); |
111 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); | 113 | extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); |
112 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); | 114 | extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); |
115 | extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); | ||
113 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); | 116 | extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); |
114 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | 117 | extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); |
118 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); | ||
115 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 119 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); |
116 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 120 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
117 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 121 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
@@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); | |||
123 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); | 127 | extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); |
124 | extern int kvmppc_mmu_hpte_sysinit(void); | 128 | extern int kvmppc_mmu_hpte_sysinit(void); |
125 | extern void kvmppc_mmu_hpte_sysexit(void); | 129 | extern void kvmppc_mmu_hpte_sysexit(void); |
130 | extern int kvmppc_mmu_hv_init(void); | ||
126 | 131 | ||
127 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 132 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
128 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); | 133 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); |
129 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); | 134 | extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); |
135 | extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); | ||
130 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | 136 | extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, |
131 | bool upper, u32 val); | 137 | bool upper, u32 val); |
132 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 138 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
133 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); | 139 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); |
134 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | 140 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
135 | 141 | ||
136 | extern ulong kvmppc_trampoline_lowmem; | 142 | extern void kvmppc_handler_lowmem_trampoline(void); |
137 | extern ulong kvmppc_trampoline_enter; | 143 | extern void kvmppc_handler_trampoline_enter(void); |
138 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); | 144 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); |
145 | extern void kvmppc_hv_entry_trampoline(void); | ||
139 | extern void kvmppc_load_up_fpu(void); | 146 | extern void kvmppc_load_up_fpu(void); |
140 | extern void kvmppc_load_up_altivec(void); | 147 | extern void kvmppc_load_up_altivec(void); |
141 | extern void kvmppc_load_up_vsx(void); | 148 | extern void kvmppc_load_up_vsx(void); |
@@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | |||
147 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); | 154 | return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); |
148 | } | 155 | } |
149 | 156 | ||
150 | static inline ulong dsisr(void) | 157 | extern void kvm_return_point(void); |
158 | |||
159 | /* Also add subarch specific defines */ | ||
160 | |||
161 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | ||
162 | #include <asm/kvm_book3s_32.h> | ||
163 | #endif | ||
164 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
165 | #include <asm/kvm_book3s_64.h> | ||
166 | #endif | ||
167 | |||
168 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
169 | |||
170 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
151 | { | 171 | { |
152 | ulong r; | 172 | return to_book3s(vcpu)->hior; |
153 | asm ( "mfdsisr %0 " : "=r" (r) ); | ||
154 | return r; | ||
155 | } | 173 | } |
156 | 174 | ||
157 | extern void kvm_return_point(void); | 175 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, |
158 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); | 176 | unsigned long pending_now, unsigned long old_pending) |
177 | { | ||
178 | if (pending_now) | ||
179 | vcpu->arch.shared->int_pending = 1; | ||
180 | else if (old_pending) | ||
181 | vcpu->arch.shared->int_pending = 0; | ||
182 | } | ||
159 | 183 | ||
160 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 184 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
161 | { | 185 | { |
@@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
244 | return to_svcpu(vcpu)->fault_dar; | 268 | return to_svcpu(vcpu)->fault_dar; |
245 | } | 269 | } |
246 | 270 | ||
271 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
272 | { | ||
273 | ulong crit_raw = vcpu->arch.shared->critical; | ||
274 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
275 | bool crit; | ||
276 | |||
277 | /* Truncate crit indicators in 32 bit mode */ | ||
278 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
279 | crit_raw &= 0xffffffff; | ||
280 | crit_r1 &= 0xffffffff; | ||
281 | } | ||
282 | |||
283 | /* Critical section when crit == r1 */ | ||
284 | crit = (crit_raw == crit_r1); | ||
285 | /* ... and we're in supervisor mode */ | ||
286 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
287 | |||
288 | return crit; | ||
289 | } | ||
290 | #else /* CONFIG_KVM_BOOK3S_PR */ | ||
291 | |||
292 | static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) | ||
293 | { | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, | ||
298 | unsigned long pending_now, unsigned long old_pending) | ||
299 | { | ||
300 | } | ||
301 | |||
302 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | ||
303 | { | ||
304 | vcpu->arch.gpr[num] = val; | ||
305 | } | ||
306 | |||
307 | static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) | ||
308 | { | ||
309 | return vcpu->arch.gpr[num]; | ||
310 | } | ||
311 | |||
312 | static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) | ||
313 | { | ||
314 | vcpu->arch.cr = val; | ||
315 | } | ||
316 | |||
317 | static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) | ||
318 | { | ||
319 | return vcpu->arch.cr; | ||
320 | } | ||
321 | |||
322 | static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) | ||
323 | { | ||
324 | vcpu->arch.xer = val; | ||
325 | } | ||
326 | |||
327 | static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) | ||
328 | { | ||
329 | return vcpu->arch.xer; | ||
330 | } | ||
331 | |||
332 | static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) | ||
333 | { | ||
334 | vcpu->arch.ctr = val; | ||
335 | } | ||
336 | |||
337 | static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) | ||
338 | { | ||
339 | return vcpu->arch.ctr; | ||
340 | } | ||
341 | |||
342 | static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) | ||
343 | { | ||
344 | vcpu->arch.lr = val; | ||
345 | } | ||
346 | |||
347 | static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) | ||
348 | { | ||
349 | return vcpu->arch.lr; | ||
350 | } | ||
351 | |||
352 | static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) | ||
353 | { | ||
354 | vcpu->arch.pc = val; | ||
355 | } | ||
356 | |||
357 | static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) | ||
358 | { | ||
359 | return vcpu->arch.pc; | ||
360 | } | ||
361 | |||
362 | static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) | ||
363 | { | ||
364 | ulong pc = kvmppc_get_pc(vcpu); | ||
365 | |||
366 | /* Load the instruction manually if it failed to do so in the | ||
367 | * exit path */ | ||
368 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | ||
369 | kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); | ||
370 | |||
371 | return vcpu->arch.last_inst; | ||
372 | } | ||
373 | |||
374 | static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | ||
375 | { | ||
376 | return vcpu->arch.fault_dar; | ||
377 | } | ||
378 | |||
379 | static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | ||
380 | { | ||
381 | return false; | ||
382 | } | ||
383 | #endif | ||
384 | |||
247 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly | 385 | /* Magic register values loaded into r3 and r4 before the 'sc' assembly |
248 | * instruction for the OSI hypercalls */ | 386 | * instruction for the OSI hypercalls */ |
249 | #define OSI_SC_MAGIC_R3 0x113724FA | 387 | #define OSI_SC_MAGIC_R3 0x113724FA |
@@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
251 | 389 | ||
252 | #define INS_DCBZ 0x7c0007ec | 390 | #define INS_DCBZ 0x7c0007ec |
253 | 391 | ||
254 | /* Also add subarch specific defines */ | ||
255 | |||
256 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
257 | #include <asm/kvm_book3s_32.h> | ||
258 | #else | ||
259 | #include <asm/kvm_book3s_64.h> | ||
260 | #endif | ||
261 | |||
262 | #endif /* __ASM_KVM_BOOK3S_H__ */ | 392 | #endif /* __ASM_KVM_BOOK3S_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 4cadd612d575..e43fe42b9875 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -20,9 +20,13 @@ | |||
20 | #ifndef __ASM_KVM_BOOK3S_64_H__ | 20 | #ifndef __ASM_KVM_BOOK3S_64_H__ |
21 | #define __ASM_KVM_BOOK3S_64_H__ | 21 | #define __ASM_KVM_BOOK3S_64_H__ |
22 | 22 | ||
23 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
23 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) | 24 | static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) |
24 | { | 25 | { |
25 | return &get_paca()->shadow_vcpu; | 26 | return &get_paca()->shadow_vcpu; |
26 | } | 27 | } |
28 | #endif | ||
29 | |||
30 | #define SPAPR_TCE_SHIFT 12 | ||
27 | 31 | ||
28 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ | 32 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index d5a8a3861635..ef7b3688c3b6 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -60,6 +60,36 @@ kvmppc_resume_\intno: | |||
60 | 60 | ||
61 | #else /*__ASSEMBLY__ */ | 61 | #else /*__ASSEMBLY__ */ |
62 | 62 | ||
63 | /* | ||
64 | * This struct goes in the PACA on 64-bit processors. It is used | ||
65 | * to store host state that needs to be saved when we enter a guest | ||
66 | * and restored when we exit, but isn't specific to any particular | ||
67 | * guest or vcpu. It also has some scratch fields used by the guest | ||
68 | * exit code. | ||
69 | */ | ||
70 | struct kvmppc_host_state { | ||
71 | ulong host_r1; | ||
72 | ulong host_r2; | ||
73 | ulong host_msr; | ||
74 | ulong vmhandler; | ||
75 | ulong scratch0; | ||
76 | ulong scratch1; | ||
77 | u8 in_guest; | ||
78 | |||
79 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
80 | struct kvm_vcpu *kvm_vcpu; | ||
81 | struct kvmppc_vcore *kvm_vcore; | ||
82 | unsigned long xics_phys; | ||
83 | u64 dabr; | ||
84 | u64 host_mmcr[3]; | ||
85 | u32 host_pmc[8]; | ||
86 | u64 host_purr; | ||
87 | u64 host_spurr; | ||
88 | u64 host_dscr; | ||
89 | u64 dec_expires; | ||
90 | #endif | ||
91 | }; | ||
92 | |||
63 | struct kvmppc_book3s_shadow_vcpu { | 93 | struct kvmppc_book3s_shadow_vcpu { |
64 | ulong gpr[14]; | 94 | ulong gpr[14]; |
65 | u32 cr; | 95 | u32 cr; |
@@ -73,17 +103,12 @@ struct kvmppc_book3s_shadow_vcpu { | |||
73 | ulong shadow_srr1; | 103 | ulong shadow_srr1; |
74 | ulong fault_dar; | 104 | ulong fault_dar; |
75 | 105 | ||
76 | ulong host_r1; | ||
77 | ulong host_r2; | ||
78 | ulong handler; | ||
79 | ulong scratch0; | ||
80 | ulong scratch1; | ||
81 | ulong vmhandler; | ||
82 | u8 in_guest; | ||
83 | |||
84 | #ifdef CONFIG_PPC_BOOK3S_32 | 106 | #ifdef CONFIG_PPC_BOOK3S_32 |
85 | u32 sr[16]; /* Guest SRs */ | 107 | u32 sr[16]; /* Guest SRs */ |
108 | |||
109 | struct kvmppc_host_state hstate; | ||
86 | #endif | 110 | #endif |
111 | |||
87 | #ifdef CONFIG_PPC_BOOK3S_64 | 112 | #ifdef CONFIG_PPC_BOOK3S_64 |
88 | u8 slb_max; /* highest used guest slb entry */ | 113 | u8 slb_max; /* highest used guest slb entry */ |
89 | struct { | 114 | struct { |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 9c9ba3d59b1b..a90e09188777 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) | |||
93 | return vcpu->arch.fault_dear; | 93 | return vcpu->arch.fault_dear; |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu) | ||
97 | { | ||
98 | return vcpu->arch.shared->msr; | ||
99 | } | ||
96 | #endif /* __ASM_KVM_BOOKE_H__ */ | 100 | #endif /* __ASM_KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h index 7a2a565f88c4..adbfca9dd100 100644 --- a/arch/powerpc/include/asm/kvm_e500.h +++ b/arch/powerpc/include/asm/kvm_e500.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Yu Liu, <yu.liu@freescale.com> | 4 | * Author: Yu Liu, <yu.liu@freescale.com> |
5 | * | 5 | * |
@@ -29,17 +29,25 @@ struct tlbe{ | |||
29 | u32 mas7; | 29 | u32 mas7; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | #define E500_TLB_VALID 1 | ||
33 | #define E500_TLB_DIRTY 2 | ||
34 | |||
35 | struct tlbe_priv { | ||
36 | pfn_t pfn; | ||
37 | unsigned int flags; /* E500_TLB_* */ | ||
38 | }; | ||
39 | |||
40 | struct vcpu_id_table; | ||
41 | |||
32 | struct kvmppc_vcpu_e500 { | 42 | struct kvmppc_vcpu_e500 { |
33 | /* Unmodified copy of the guest's TLB. */ | 43 | /* Unmodified copy of the guest's TLB. */ |
34 | struct tlbe *guest_tlb[E500_TLB_NUM]; | 44 | struct tlbe *gtlb_arch[E500_TLB_NUM]; |
35 | /* TLB that's actually used when the guest is running. */ | ||
36 | struct tlbe *shadow_tlb[E500_TLB_NUM]; | ||
37 | /* Pages which are referenced in the shadow TLB. */ | ||
38 | struct page **shadow_pages[E500_TLB_NUM]; | ||
39 | 45 | ||
40 | unsigned int guest_tlb_size[E500_TLB_NUM]; | 46 | /* KVM internal information associated with each guest TLB entry */ |
41 | unsigned int shadow_tlb_size[E500_TLB_NUM]; | 47 | struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; |
42 | unsigned int guest_tlb_nv[E500_TLB_NUM]; | 48 | |
49 | unsigned int gtlb_size[E500_TLB_NUM]; | ||
50 | unsigned int gtlb_nv[E500_TLB_NUM]; | ||
43 | 51 | ||
44 | u32 host_pid[E500_PID_NUM]; | 52 | u32 host_pid[E500_PID_NUM]; |
45 | u32 pid[E500_PID_NUM]; | 53 | u32 pid[E500_PID_NUM]; |
@@ -53,6 +61,10 @@ struct kvmppc_vcpu_e500 { | |||
53 | u32 mas5; | 61 | u32 mas5; |
54 | u32 mas6; | 62 | u32 mas6; |
55 | u32 mas7; | 63 | u32 mas7; |
64 | |||
65 | /* vcpu id table */ | ||
66 | struct vcpu_id_table *idt; | ||
67 | |||
56 | u32 l1csr0; | 68 | u32 l1csr0; |
57 | u32 l1csr1; | 69 | u32 l1csr1; |
58 | u32 hid0; | 70 | u32 hid0; |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 186f150b9b89..cc22b282d755 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -25,15 +25,23 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/kvm_types.h> | 27 | #include <linux/kvm_types.h> |
28 | #include <linux/threads.h> | ||
29 | #include <linux/spinlock.h> | ||
28 | #include <linux/kvm_para.h> | 30 | #include <linux/kvm_para.h> |
31 | #include <linux/list.h> | ||
32 | #include <linux/atomic.h> | ||
29 | #include <asm/kvm_asm.h> | 33 | #include <asm/kvm_asm.h> |
34 | #include <asm/processor.h> | ||
30 | 35 | ||
31 | #define KVM_MAX_VCPUS 1 | 36 | #define KVM_MAX_VCPUS NR_CPUS |
37 | #define KVM_MAX_VCORES NR_CPUS | ||
32 | #define KVM_MEMORY_SLOTS 32 | 38 | #define KVM_MEMORY_SLOTS 32 |
33 | /* memory slots that does not exposed to userspace */ | 39 | /* memory slots that does not exposed to userspace */ |
34 | #define KVM_PRIVATE_MEM_SLOTS 4 | 40 | #define KVM_PRIVATE_MEM_SLOTS 4 |
35 | 41 | ||
42 | #ifdef CONFIG_KVM_MMIO | ||
36 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 43 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
44 | #endif | ||
37 | 45 | ||
38 | /* We don't currently support large pages. */ | 46 | /* We don't currently support large pages. */ |
39 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | 47 | #define KVM_HPAGE_GFN_SHIFT(x) 0 |
@@ -57,6 +65,10 @@ struct kvm; | |||
57 | struct kvm_run; | 65 | struct kvm_run; |
58 | struct kvm_vcpu; | 66 | struct kvm_vcpu; |
59 | 67 | ||
68 | struct lppaca; | ||
69 | struct slb_shadow; | ||
70 | struct dtl; | ||
71 | |||
60 | struct kvm_vm_stat { | 72 | struct kvm_vm_stat { |
61 | u32 remote_tlb_flush; | 73 | u32 remote_tlb_flush; |
62 | }; | 74 | }; |
@@ -133,9 +145,74 @@ struct kvmppc_exit_timing { | |||
133 | }; | 145 | }; |
134 | }; | 146 | }; |
135 | 147 | ||
148 | struct kvmppc_pginfo { | ||
149 | unsigned long pfn; | ||
150 | atomic_t refcnt; | ||
151 | }; | ||
152 | |||
153 | struct kvmppc_spapr_tce_table { | ||
154 | struct list_head list; | ||
155 | struct kvm *kvm; | ||
156 | u64 liobn; | ||
157 | u32 window_size; | ||
158 | struct page *pages[0]; | ||
159 | }; | ||
160 | |||
161 | struct kvmppc_rma_info { | ||
162 | void *base_virt; | ||
163 | unsigned long base_pfn; | ||
164 | unsigned long npages; | ||
165 | struct list_head list; | ||
166 | atomic_t use_count; | ||
167 | }; | ||
168 | |||
136 | struct kvm_arch { | 169 | struct kvm_arch { |
170 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
171 | unsigned long hpt_virt; | ||
172 | unsigned long ram_npages; | ||
173 | unsigned long ram_psize; | ||
174 | unsigned long ram_porder; | ||
175 | struct kvmppc_pginfo *ram_pginfo; | ||
176 | unsigned int lpid; | ||
177 | unsigned int host_lpid; | ||
178 | unsigned long host_lpcr; | ||
179 | unsigned long sdr1; | ||
180 | unsigned long host_sdr1; | ||
181 | int tlbie_lock; | ||
182 | int n_rma_pages; | ||
183 | unsigned long lpcr; | ||
184 | unsigned long rmor; | ||
185 | struct kvmppc_rma_info *rma; | ||
186 | struct list_head spapr_tce_tables; | ||
187 | unsigned short last_vcpu[NR_CPUS]; | ||
188 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; | ||
189 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | ||
137 | }; | 190 | }; |
138 | 191 | ||
192 | /* | ||
193 | * Struct for a virtual core. | ||
194 | * Note: entry_exit_count combines an entry count in the bottom 8 bits | ||
195 | * and an exit count in the next 8 bits. This is so that we can | ||
196 | * atomically increment the entry count iff the exit count is 0 | ||
197 | * without taking the lock. | ||
198 | */ | ||
199 | struct kvmppc_vcore { | ||
200 | int n_runnable; | ||
201 | int n_blocked; | ||
202 | int num_threads; | ||
203 | int entry_exit_count; | ||
204 | int n_woken; | ||
205 | int nap_count; | ||
206 | u16 pcpu; | ||
207 | u8 vcore_running; | ||
208 | u8 in_guest; | ||
209 | struct list_head runnable_threads; | ||
210 | spinlock_t lock; | ||
211 | }; | ||
212 | |||
213 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) | ||
214 | #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8) | ||
215 | |||
139 | struct kvmppc_pte { | 216 | struct kvmppc_pte { |
140 | ulong eaddr; | 217 | ulong eaddr; |
141 | u64 vpage; | 218 | u64 vpage; |
@@ -163,16 +240,18 @@ struct kvmppc_mmu { | |||
163 | bool (*is_dcbz32)(struct kvm_vcpu *vcpu); | 240 | bool (*is_dcbz32)(struct kvm_vcpu *vcpu); |
164 | }; | 241 | }; |
165 | 242 | ||
166 | struct hpte_cache { | 243 | struct kvmppc_slb { |
167 | struct hlist_node list_pte; | 244 | u64 esid; |
168 | struct hlist_node list_pte_long; | 245 | u64 vsid; |
169 | struct hlist_node list_vpte; | 246 | u64 orige; |
170 | struct hlist_node list_vpte_long; | 247 | u64 origv; |
171 | struct rcu_head rcu_head; | 248 | bool valid : 1; |
172 | u64 host_va; | 249 | bool Ks : 1; |
173 | u64 pfn; | 250 | bool Kp : 1; |
174 | ulong slot; | 251 | bool nx : 1; |
175 | struct kvmppc_pte pte; | 252 | bool large : 1; /* PTEs are 16MB */ |
253 | bool tb : 1; /* 1TB segment */ | ||
254 | bool class : 1; | ||
176 | }; | 255 | }; |
177 | 256 | ||
178 | struct kvm_vcpu_arch { | 257 | struct kvm_vcpu_arch { |
@@ -187,6 +266,9 @@ struct kvm_vcpu_arch { | |||
187 | ulong highmem_handler; | 266 | ulong highmem_handler; |
188 | ulong rmcall; | 267 | ulong rmcall; |
189 | ulong host_paca_phys; | 268 | ulong host_paca_phys; |
269 | struct kvmppc_slb slb[64]; | ||
270 | int slb_max; /* 1 + index of last valid entry in slb[] */ | ||
271 | int slb_nr; /* total number of entries in SLB */ | ||
190 | struct kvmppc_mmu mmu; | 272 | struct kvmppc_mmu mmu; |
191 | #endif | 273 | #endif |
192 | 274 | ||
@@ -195,13 +277,19 @@ struct kvm_vcpu_arch { | |||
195 | u64 fpr[32]; | 277 | u64 fpr[32]; |
196 | u64 fpscr; | 278 | u64 fpscr; |
197 | 279 | ||
280 | #ifdef CONFIG_SPE | ||
281 | ulong evr[32]; | ||
282 | ulong spefscr; | ||
283 | ulong host_spefscr; | ||
284 | u64 acc; | ||
285 | #endif | ||
198 | #ifdef CONFIG_ALTIVEC | 286 | #ifdef CONFIG_ALTIVEC |
199 | vector128 vr[32]; | 287 | vector128 vr[32]; |
200 | vector128 vscr; | 288 | vector128 vscr; |
201 | #endif | 289 | #endif |
202 | 290 | ||
203 | #ifdef CONFIG_VSX | 291 | #ifdef CONFIG_VSX |
204 | u64 vsr[32]; | 292 | u64 vsr[64]; |
205 | #endif | 293 | #endif |
206 | 294 | ||
207 | #ifdef CONFIG_PPC_BOOK3S | 295 | #ifdef CONFIG_PPC_BOOK3S |
@@ -209,22 +297,27 @@ struct kvm_vcpu_arch { | |||
209 | u32 qpr[32]; | 297 | u32 qpr[32]; |
210 | #endif | 298 | #endif |
211 | 299 | ||
212 | #ifdef CONFIG_BOOKE | ||
213 | ulong pc; | 300 | ulong pc; |
214 | ulong ctr; | 301 | ulong ctr; |
215 | ulong lr; | 302 | ulong lr; |
216 | 303 | ||
217 | ulong xer; | 304 | ulong xer; |
218 | u32 cr; | 305 | u32 cr; |
219 | #endif | ||
220 | 306 | ||
221 | #ifdef CONFIG_PPC_BOOK3S | 307 | #ifdef CONFIG_PPC_BOOK3S |
222 | ulong shadow_msr; | ||
223 | ulong hflags; | 308 | ulong hflags; |
224 | ulong guest_owned_ext; | 309 | ulong guest_owned_ext; |
310 | ulong purr; | ||
311 | ulong spurr; | ||
312 | ulong dscr; | ||
313 | ulong amr; | ||
314 | ulong uamor; | ||
315 | u32 ctrl; | ||
316 | ulong dabr; | ||
225 | #endif | 317 | #endif |
226 | u32 vrsave; /* also USPRG0 */ | 318 | u32 vrsave; /* also USPRG0 */ |
227 | u32 mmucr; | 319 | u32 mmucr; |
320 | ulong shadow_msr; | ||
228 | ulong sprg4; | 321 | ulong sprg4; |
229 | ulong sprg5; | 322 | ulong sprg5; |
230 | ulong sprg6; | 323 | ulong sprg6; |
@@ -249,6 +342,7 @@ struct kvm_vcpu_arch { | |||
249 | u32 pvr; | 342 | u32 pvr; |
250 | 343 | ||
251 | u32 shadow_pid; | 344 | u32 shadow_pid; |
345 | u32 shadow_pid1; | ||
252 | u32 pid; | 346 | u32 pid; |
253 | u32 swap_pid; | 347 | u32 swap_pid; |
254 | 348 | ||
@@ -258,6 +352,9 @@ struct kvm_vcpu_arch { | |||
258 | u32 dbcr1; | 352 | u32 dbcr1; |
259 | u32 dbsr; | 353 | u32 dbsr; |
260 | 354 | ||
355 | u64 mmcr[3]; | ||
356 | u32 pmc[8]; | ||
357 | |||
261 | #ifdef CONFIG_KVM_EXIT_TIMING | 358 | #ifdef CONFIG_KVM_EXIT_TIMING |
262 | struct mutex exit_timing_lock; | 359 | struct mutex exit_timing_lock; |
263 | struct kvmppc_exit_timing timing_exit; | 360 | struct kvmppc_exit_timing timing_exit; |
@@ -272,8 +369,12 @@ struct kvm_vcpu_arch { | |||
272 | struct dentry *debugfs_exit_timing; | 369 | struct dentry *debugfs_exit_timing; |
273 | #endif | 370 | #endif |
274 | 371 | ||
372 | #ifdef CONFIG_PPC_BOOK3S | ||
373 | ulong fault_dar; | ||
374 | u32 fault_dsisr; | ||
375 | #endif | ||
376 | |||
275 | #ifdef CONFIG_BOOKE | 377 | #ifdef CONFIG_BOOKE |
276 | u32 last_inst; | ||
277 | ulong fault_dear; | 378 | ulong fault_dear; |
278 | ulong fault_esr; | 379 | ulong fault_esr; |
279 | ulong queued_dear; | 380 | ulong queued_dear; |
@@ -288,25 +389,47 @@ struct kvm_vcpu_arch { | |||
288 | u8 dcr_is_write; | 389 | u8 dcr_is_write; |
289 | u8 osi_needed; | 390 | u8 osi_needed; |
290 | u8 osi_enabled; | 391 | u8 osi_enabled; |
392 | u8 hcall_needed; | ||
291 | 393 | ||
292 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ | 394 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ |
293 | 395 | ||
294 | struct hrtimer dec_timer; | 396 | struct hrtimer dec_timer; |
295 | struct tasklet_struct tasklet; | 397 | struct tasklet_struct tasklet; |
296 | u64 dec_jiffies; | 398 | u64 dec_jiffies; |
399 | u64 dec_expires; | ||
297 | unsigned long pending_exceptions; | 400 | unsigned long pending_exceptions; |
401 | u16 last_cpu; | ||
402 | u8 ceded; | ||
403 | u8 prodded; | ||
404 | u32 last_inst; | ||
405 | |||
406 | struct lppaca *vpa; | ||
407 | struct slb_shadow *slb_shadow; | ||
408 | struct dtl *dtl; | ||
409 | struct dtl *dtl_end; | ||
410 | |||
411 | struct kvmppc_vcore *vcore; | ||
412 | int ret; | ||
413 | int trap; | ||
414 | int state; | ||
415 | int ptid; | ||
416 | wait_queue_head_t cpu_run; | ||
417 | |||
298 | struct kvm_vcpu_arch_shared *shared; | 418 | struct kvm_vcpu_arch_shared *shared; |
299 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ | 419 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ |
300 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ | 420 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ |
301 | 421 | ||
302 | #ifdef CONFIG_PPC_BOOK3S | 422 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
303 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | 423 | struct kvm_vcpu_arch_shared shregs; |
304 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | 424 | |
305 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 425 | struct list_head run_list; |
306 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | 426 | struct task_struct *run_task; |
307 | int hpte_cache_count; | 427 | struct kvm_run *kvm_run; |
308 | spinlock_t mmu_lock; | ||
309 | #endif | 428 | #endif |
310 | }; | 429 | }; |
311 | 430 | ||
431 | #define KVMPPC_VCPU_BUSY_IN_HOST 0 | ||
432 | #define KVMPPC_VCPU_BLOCKED 1 | ||
433 | #define KVMPPC_VCPU_RUNNABLE 2 | ||
434 | |||
312 | #endif /* __POWERPC_KVM_HOST_H__ */ | 435 | #endif /* __POWERPC_KVM_HOST_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9345238edecf..d121f49d62b8 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -33,6 +33,9 @@ | |||
33 | #else | 33 | #else |
34 | #include <asm/kvm_booke.h> | 34 | #include <asm/kvm_booke.h> |
35 | #endif | 35 | #endif |
36 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
37 | #include <asm/paca.h> | ||
38 | #endif | ||
36 | 39 | ||
37 | enum emulation_result { | 40 | enum emulation_result { |
38 | EMULATE_DONE, /* no further processing */ | 41 | EMULATE_DONE, /* no further processing */ |
@@ -42,6 +45,7 @@ enum emulation_result { | |||
42 | EMULATE_AGAIN, /* something went wrong. go again */ | 45 | EMULATE_AGAIN, /* something went wrong. go again */ |
43 | }; | 46 | }; |
44 | 47 | ||
48 | extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | ||
45 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 49 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
46 | extern char kvmppc_handlers_start[]; | 50 | extern char kvmppc_handlers_start[]; |
47 | extern unsigned long kvmppc_handler_len; | 51 | extern unsigned long kvmppc_handler_len; |
@@ -109,6 +113,27 @@ extern void kvmppc_booke_exit(void); | |||
109 | 113 | ||
110 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | 114 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); |
111 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); | 115 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); |
116 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); | ||
117 | |||
118 | extern long kvmppc_alloc_hpt(struct kvm *kvm); | ||
119 | extern void kvmppc_free_hpt(struct kvm *kvm); | ||
120 | extern long kvmppc_prepare_vrma(struct kvm *kvm, | ||
121 | struct kvm_userspace_memory_region *mem); | ||
122 | extern void kvmppc_map_vrma(struct kvm *kvm, | ||
123 | struct kvm_userspace_memory_region *mem); | ||
124 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); | ||
125 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
126 | struct kvm_create_spapr_tce *args); | ||
127 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, | ||
128 | struct kvm_allocate_rma *rma); | ||
129 | extern struct kvmppc_rma_info *kvm_alloc_rma(void); | ||
130 | extern void kvm_release_rma(struct kvmppc_rma_info *ri); | ||
131 | extern int kvmppc_core_init_vm(struct kvm *kvm); | ||
132 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | ||
133 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | ||
134 | struct kvm_userspace_memory_region *mem); | ||
135 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, | ||
136 | struct kvm_userspace_memory_region *mem); | ||
112 | 137 | ||
113 | /* | 138 | /* |
114 | * Cuts out inst bits with ordering according to spec. | 139 | * Cuts out inst bits with ordering according to spec. |
@@ -151,4 +176,20 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | |||
151 | 176 | ||
152 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); | 177 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); |
153 | 178 | ||
179 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
180 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | ||
181 | { | ||
182 | paca[cpu].kvm_hstate.xics_phys = addr; | ||
183 | } | ||
184 | |||
185 | extern void kvm_rma_init(void); | ||
186 | |||
187 | #else | ||
188 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | ||
189 | {} | ||
190 | |||
191 | static inline void kvm_rma_init(void) | ||
192 | {} | ||
193 | #endif | ||
194 | |||
154 | #endif /* __POWERPC_KVM_PPC_H__ */ | 195 | #endif /* __POWERPC_KVM_PPC_H__ */ |
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index c2410af6bfd9..b8da91363864 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ARCH_POWERPC_LOCAL_H | 2 | #define _ARCH_POWERPC_LOCAL_H |
3 | 3 | ||
4 | #include <linux/percpu.h> | 4 | #include <linux/percpu.h> |
5 | #include <asm/atomic.h> | 5 | #include <linux/atomic.h> |
6 | 6 | ||
7 | typedef struct | 7 | typedef struct |
8 | { | 8 | { |
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h index 7ab82c825a03..27af7f8bbb8d 100644 --- a/arch/powerpc/include/asm/macio.h +++ b/arch/powerpc/include/asm/macio.h | |||
@@ -76,7 +76,7 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour | |||
76 | struct resource *res = &dev->resource[resource_no]; | 76 | struct resource *res = &dev->resource[resource_no]; |
77 | if (res->start == 0 || res->end == 0 || res->end < res->start) | 77 | if (res->start == 0 || res->end == 0 || res->end < res->start) |
78 | return 0; | 78 | return 0; |
79 | return res->end - res->start + 1; | 79 | return resource_size(res); |
80 | } | 80 | } |
81 | 81 | ||
82 | extern int macio_enable_devres(struct macio_dev *dev); | 82 | extern int macio_enable_devres(struct macio_dev *dev); |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index d865bd909c7d..b445e0af4c2b 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -90,13 +90,19 @@ extern char initial_stab[]; | |||
90 | 90 | ||
91 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) | 91 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) |
92 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) | 92 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) |
93 | #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) | ||
93 | #define HPTE_R_RPN_SHIFT 12 | 94 | #define HPTE_R_RPN_SHIFT 12 |
94 | #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) | 95 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) |
95 | #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) | ||
96 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) | 96 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
97 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | 97 | #define HPTE_R_N ASM_CONST(0x0000000000000004) |
98 | #define HPTE_R_G ASM_CONST(0x0000000000000008) | ||
99 | #define HPTE_R_M ASM_CONST(0x0000000000000010) | ||
100 | #define HPTE_R_I ASM_CONST(0x0000000000000020) | ||
101 | #define HPTE_R_W ASM_CONST(0x0000000000000040) | ||
102 | #define HPTE_R_WIMG ASM_CONST(0x0000000000000078) | ||
98 | #define HPTE_R_C ASM_CONST(0x0000000000000080) | 103 | #define HPTE_R_C ASM_CONST(0x0000000000000080) |
99 | #define HPTE_R_R ASM_CONST(0x0000000000000100) | 104 | #define HPTE_R_R ASM_CONST(0x0000000000000100) |
105 | #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) | ||
100 | 106 | ||
101 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) | 107 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) |
102 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) | 108 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 4138b21ae80a..698b30638681 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -115,14 +115,24 @@ | |||
115 | #ifndef __ASSEMBLY__ | 115 | #ifndef __ASSEMBLY__ |
116 | #include <asm/cputable.h> | 116 | #include <asm/cputable.h> |
117 | 117 | ||
118 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
119 | #include <asm/percpu.h> | ||
120 | DECLARE_PER_CPU(int, next_tlbcam_idx); | ||
121 | #endif | ||
122 | |||
118 | static inline int mmu_has_feature(unsigned long feature) | 123 | static inline int mmu_has_feature(unsigned long feature) |
119 | { | 124 | { |
120 | return (cur_cpu_spec->mmu_features & feature); | 125 | return (cur_cpu_spec->mmu_features & feature); |
121 | } | 126 | } |
122 | 127 | ||
128 | static inline void mmu_clear_feature(unsigned long feature) | ||
129 | { | ||
130 | cur_cpu_spec->mmu_features &= ~feature; | ||
131 | } | ||
132 | |||
123 | extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; | 133 | extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; |
124 | 134 | ||
125 | /* MMU initialization (64-bit only fo now) */ | 135 | /* MMU initialization */ |
126 | extern void early_init_mmu(void); | 136 | extern void early_init_mmu(void); |
127 | extern void early_init_mmu_secondary(void); | 137 | extern void early_init_mmu_secondary(void); |
128 | 138 | ||
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h index 89d2f99c1bf4..23cd6cc30bcf 100644 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/arch/powerpc/include/asm/pSeries_reconfig.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #ifdef CONFIG_PPC_PSERIES | 17 | #ifdef CONFIG_PPC_PSERIES |
18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); | 18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); |
19 | extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); | 19 | extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); |
20 | extern struct blocking_notifier_head pSeries_reconfig_chain; | 20 | extern int pSeries_reconfig_notify(unsigned long action, void *p); |
21 | /* Not the best place to put this, will be fixed when we move some | 21 | /* Not the best place to put this, will be fixed when we move some |
22 | * of the rtas suspend-me stuff to pseries */ | 22 | * of the rtas suspend-me stuff to pseries */ |
23 | extern void pSeries_coalesce_init(void); | 23 | extern void pSeries_coalesce_init(void); |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 74126765106a..516bfb3f47d9 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -103,11 +103,12 @@ struct paca_struct { | |||
103 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 103 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
104 | 104 | ||
105 | #ifdef CONFIG_PPC_BOOK3E | 105 | #ifdef CONFIG_PPC_BOOK3E |
106 | pgd_t *pgd; /* Current PGD */ | ||
107 | pgd_t *kernel_pgd; /* Kernel PGD */ | ||
108 | u64 exgen[8] __attribute__((aligned(0x80))); | 106 | u64 exgen[8] __attribute__((aligned(0x80))); |
107 | /* Keep pgd in the same cacheline as the start of extlb */ | ||
108 | pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ | ||
109 | pgd_t *kernel_pgd; /* Kernel PGD */ | ||
109 | /* We can have up to 3 levels of reentrancy in the TLB miss handler */ | 110 | /* We can have up to 3 levels of reentrancy in the TLB miss handler */ |
110 | u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80))); | 111 | u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; |
111 | u64 exmc[8]; /* used for machine checks */ | 112 | u64 exmc[8]; /* used for machine checks */ |
112 | u64 excrit[8]; /* used for crit interrupts */ | 113 | u64 excrit[8]; /* used for crit interrupts */ |
113 | u64 exdbg[8]; /* used for debug interrupts */ | 114 | u64 exdbg[8]; /* used for debug interrupts */ |
@@ -147,9 +148,12 @@ struct paca_struct { | |||
147 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ | 148 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ |
148 | 149 | ||
149 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 150 | #ifdef CONFIG_KVM_BOOK3S_HANDLER |
151 | #ifdef CONFIG_KVM_BOOK3S_PR | ||
150 | /* We use this to store guest state in */ | 152 | /* We use this to store guest state in */ |
151 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; | 153 | struct kvmppc_book3s_shadow_vcpu shadow_vcpu; |
152 | #endif | 154 | #endif |
155 | struct kvmppc_host_state kvm_hstate; | ||
156 | #endif | ||
153 | }; | 157 | }; |
154 | 158 | ||
155 | extern struct paca_struct *paca; | 159 | extern struct paca_struct *paca; |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index b90dbf8e5cd9..56b879ab3a40 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -10,58 +10,10 @@ | |||
10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/ioport.h> | 12 | #include <linux/ioport.h> |
13 | #include <asm-generic/pci-bridge.h> | ||
13 | 14 | ||
14 | struct device_node; | 15 | struct device_node; |
15 | 16 | ||
16 | enum { | ||
17 | /* Force re-assigning all resources (ignore firmware | ||
18 | * setup completely) | ||
19 | */ | ||
20 | PPC_PCI_REASSIGN_ALL_RSRC = 0x00000001, | ||
21 | |||
22 | /* Re-assign all bus numbers */ | ||
23 | PPC_PCI_REASSIGN_ALL_BUS = 0x00000002, | ||
24 | |||
25 | /* Do not try to assign, just use existing setup */ | ||
26 | PPC_PCI_PROBE_ONLY = 0x00000004, | ||
27 | |||
28 | /* Don't bother with ISA alignment unless the bridge has | ||
29 | * ISA forwarding enabled | ||
30 | */ | ||
31 | PPC_PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, | ||
32 | |||
33 | /* Enable domain numbers in /proc */ | ||
34 | PPC_PCI_ENABLE_PROC_DOMAINS = 0x00000010, | ||
35 | /* ... except for domain 0 */ | ||
36 | PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020, | ||
37 | }; | ||
38 | #ifdef CONFIG_PCI | ||
39 | extern unsigned int ppc_pci_flags; | ||
40 | |||
41 | static inline void ppc_pci_set_flags(int flags) | ||
42 | { | ||
43 | ppc_pci_flags = flags; | ||
44 | } | ||
45 | |||
46 | static inline void ppc_pci_add_flags(int flags) | ||
47 | { | ||
48 | ppc_pci_flags |= flags; | ||
49 | } | ||
50 | |||
51 | static inline int ppc_pci_has_flag(int flag) | ||
52 | { | ||
53 | return (ppc_pci_flags & flag); | ||
54 | } | ||
55 | #else | ||
56 | static inline void ppc_pci_set_flags(int flags) { } | ||
57 | static inline void ppc_pci_add_flags(int flags) { } | ||
58 | static inline int ppc_pci_has_flag(int flag) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | #endif | ||
63 | |||
64 | |||
65 | /* | 17 | /* |
66 | * Structure of a PCI controller (host bridge) | 18 | * Structure of a PCI controller (host bridge) |
67 | */ | 19 | */ |
@@ -171,15 +123,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) | |||
171 | 123 | ||
172 | #ifndef CONFIG_PPC64 | 124 | #ifndef CONFIG_PPC64 |
173 | 125 | ||
174 | static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | 126 | extern int pci_device_from_OF_node(struct device_node *node, |
175 | { | 127 | u8 *bus, u8 *devfn); |
176 | struct pci_controller *host; | 128 | extern void pci_create_OF_bus_map(void); |
177 | |||
178 | if (bus->self) | ||
179 | return pci_device_to_OF_node(bus->self); | ||
180 | host = pci_bus_to_host(bus); | ||
181 | return host ? host->dn : NULL; | ||
182 | } | ||
183 | 129 | ||
184 | static inline int isa_vaddr_is_ioport(void __iomem *address) | 130 | static inline int isa_vaddr_is_ioport(void __iomem *address) |
185 | { | 131 | { |
@@ -223,17 +169,8 @@ struct pci_dn { | |||
223 | /* Get the pointer to a device_node's pci_dn */ | 169 | /* Get the pointer to a device_node's pci_dn */ |
224 | #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) | 170 | #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) |
225 | 171 | ||
226 | extern struct device_node *fetch_dev_dn(struct pci_dev *dev); | ||
227 | extern void * update_dn_pci_info(struct device_node *dn, void *data); | 172 | extern void * update_dn_pci_info(struct device_node *dn, void *data); |
228 | 173 | ||
229 | /* Get a device_node from a pci_dev. This code must be fast except | ||
230 | * in the case where the sysdata is incorrect and needs to be fixed | ||
231 | * up (this will only happen once). */ | ||
232 | static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) | ||
233 | { | ||
234 | return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev); | ||
235 | } | ||
236 | |||
237 | static inline int pci_device_from_OF_node(struct device_node *np, | 174 | static inline int pci_device_from_OF_node(struct device_node *np, |
238 | u8 *bus, u8 *devfn) | 175 | u8 *bus, u8 *devfn) |
239 | { | 176 | { |
@@ -244,14 +181,6 @@ static inline int pci_device_from_OF_node(struct device_node *np, | |||
244 | return 0; | 181 | return 0; |
245 | } | 182 | } |
246 | 183 | ||
247 | static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | ||
248 | { | ||
249 | if (bus->self) | ||
250 | return pci_device_to_OF_node(bus->self); | ||
251 | else | ||
252 | return bus->dev.of_node; /* Must be root bus (PHB) */ | ||
253 | } | ||
254 | |||
255 | /** Find the bus corresponding to the indicated device node */ | 184 | /** Find the bus corresponding to the indicated device node */ |
256 | extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); | 185 | extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); |
257 | 186 | ||
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 7d7790954e02..49c3de582be0 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -44,7 +44,7 @@ struct pci_dev; | |||
44 | * bus numbers (don't do that on ppc64 yet !) | 44 | * bus numbers (don't do that on ppc64 yet !) |
45 | */ | 45 | */ |
46 | #define pcibios_assign_all_busses() \ | 46 | #define pcibios_assign_all_busses() \ |
47 | (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS)) | 47 | (pci_has_flag(PCI_REASSIGN_ALL_BUS)) |
48 | 48 | ||
49 | static inline void pcibios_set_master(struct pci_dev *dev) | 49 | static inline void pcibios_set_master(struct pci_dev *dev) |
50 | { | 50 | { |
@@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb); | |||
179 | extern struct pci_dev *of_create_pci_dev(struct device_node *node, | 179 | extern struct pci_dev *of_create_pci_dev(struct device_node *node, |
180 | struct pci_bus *bus, int devfn); | 180 | struct pci_bus *bus, int devfn); |
181 | 181 | ||
182 | extern void of_scan_pci_bridge(struct device_node *node, | 182 | extern void of_scan_pci_bridge(struct pci_dev *dev); |
183 | struct pci_dev *dev); | ||
184 | 183 | ||
185 | extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); | 184 | extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); |
186 | extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); | 185 | extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 81576ee0cfb1..c4205616dfb5 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -357,7 +357,8 @@ void pgtable_cache_init(void); | |||
357 | /* | 357 | /* |
358 | * find_linux_pte returns the address of a linux pte for a given | 358 | * find_linux_pte returns the address of a linux pte for a given |
359 | * effective address and directory. If not found, it returns zero. | 359 | * effective address and directory. If not found, it returns zero. |
360 | */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | 360 | */ |
361 | static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | ||
361 | { | 362 | { |
362 | pgd_t *pg; | 363 | pgd_t *pg; |
363 | pud_t *pu; | 364 | pud_t *pu; |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index e472659d906c..e980faae4225 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -71,6 +71,42 @@ | |||
71 | #define PPC_INST_ERATSX 0x7c000126 | 71 | #define PPC_INST_ERATSX 0x7c000126 |
72 | #define PPC_INST_ERATSX_DOT 0x7c000127 | 72 | #define PPC_INST_ERATSX_DOT 0x7c000127 |
73 | 73 | ||
74 | /* Misc instructions for BPF compiler */ | ||
75 | #define PPC_INST_LD 0xe8000000 | ||
76 | #define PPC_INST_LHZ 0xa0000000 | ||
77 | #define PPC_INST_LWZ 0x80000000 | ||
78 | #define PPC_INST_STD 0xf8000000 | ||
79 | #define PPC_INST_STDU 0xf8000001 | ||
80 | #define PPC_INST_MFLR 0x7c0802a6 | ||
81 | #define PPC_INST_MTLR 0x7c0803a6 | ||
82 | #define PPC_INST_CMPWI 0x2c000000 | ||
83 | #define PPC_INST_CMPDI 0x2c200000 | ||
84 | #define PPC_INST_CMPLW 0x7c000040 | ||
85 | #define PPC_INST_CMPLWI 0x28000000 | ||
86 | #define PPC_INST_ADDI 0x38000000 | ||
87 | #define PPC_INST_ADDIS 0x3c000000 | ||
88 | #define PPC_INST_ADD 0x7c000214 | ||
89 | #define PPC_INST_SUB 0x7c000050 | ||
90 | #define PPC_INST_BLR 0x4e800020 | ||
91 | #define PPC_INST_BLRL 0x4e800021 | ||
92 | #define PPC_INST_MULLW 0x7c0001d6 | ||
93 | #define PPC_INST_MULHWU 0x7c000016 | ||
94 | #define PPC_INST_MULLI 0x1c000000 | ||
95 | #define PPC_INST_DIVWU 0x7c0003d6 | ||
96 | #define PPC_INST_RLWINM 0x54000000 | ||
97 | #define PPC_INST_RLDICR 0x78000004 | ||
98 | #define PPC_INST_SLW 0x7c000030 | ||
99 | #define PPC_INST_SRW 0x7c000430 | ||
100 | #define PPC_INST_AND 0x7c000038 | ||
101 | #define PPC_INST_ANDDOT 0x7c000039 | ||
102 | #define PPC_INST_OR 0x7c000378 | ||
103 | #define PPC_INST_ANDI 0x70000000 | ||
104 | #define PPC_INST_ORI 0x60000000 | ||
105 | #define PPC_INST_ORIS 0x64000000 | ||
106 | #define PPC_INST_NEG 0x7c0000d0 | ||
107 | #define PPC_INST_BRANCH 0x48000000 | ||
108 | #define PPC_INST_BRANCH_COND 0x40800000 | ||
109 | |||
74 | /* macros to insert fields into opcodes */ | 110 | /* macros to insert fields into opcodes */ |
75 | #define __PPC_RA(a) (((a) & 0x1f) << 16) | 111 | #define __PPC_RA(a) (((a) & 0x1f) << 16) |
76 | #define __PPC_RB(b) (((b) & 0x1f) << 11) | 112 | #define __PPC_RB(b) (((b) & 0x1f) << 11) |
@@ -83,6 +119,10 @@ | |||
83 | #define __PPC_T_TLB(t) (((t) & 0x3) << 21) | 119 | #define __PPC_T_TLB(t) (((t) & 0x3) << 21) |
84 | #define __PPC_WC(w) (((w) & 0x3) << 21) | 120 | #define __PPC_WC(w) (((w) & 0x3) << 21) |
85 | #define __PPC_WS(w) (((w) & 0x1f) << 11) | 121 | #define __PPC_WS(w) (((w) & 0x1f) << 11) |
122 | #define __PPC_SH(s) __PPC_WS(s) | ||
123 | #define __PPC_MB(s) (((s) & 0x1f) << 6) | ||
124 | #define __PPC_ME(s) (((s) & 0x1f) << 1) | ||
125 | #define __PPC_BI(s) (((s) & 0x1f) << 16) | ||
86 | 126 | ||
87 | /* | 127 | /* |
88 | * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a | 128 | * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 1b422381fc16..368f72f79808 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -150,18 +150,22 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
150 | #define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base) | 150 | #define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base) |
151 | #define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base) | 151 | #define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base) |
152 | 152 | ||
153 | #define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base) | 153 | /* |
154 | #define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base) | 154 | * b = base register for addressing, o = base offset from register of 1st EVR |
155 | #define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base) | 155 | * n = first EVR, s = scratch |
156 | #define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base) | 156 | */ |
157 | #define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base) | 157 | #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b) |
158 | #define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base) | 158 | #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o) |
159 | #define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n | 159 | #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o) |
160 | #define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base) | 160 | #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o) |
161 | #define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base) | 161 | #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o) |
162 | #define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base) | 162 | #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o) |
163 | #define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base) | 163 | #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n |
164 | #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) | 164 | #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o) |
165 | #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o) | ||
166 | #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o) | ||
167 | #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o) | ||
168 | #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o) | ||
165 | 169 | ||
166 | /* Macros to adjust thread priority for hardware multithreading */ | 170 | /* Macros to adjust thread priority for hardware multithreading */ |
167 | #define HMT_VERY_LOW or 31,31,31 # very low priority | 171 | #define HMT_VERY_LOW or 31,31,31 # very low priority |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index d50c2b6d9bc3..eb11a446720e 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/cache.h> | ||
23 | #include <asm/ptrace.h> | 24 | #include <asm/ptrace.h> |
24 | #include <asm/types.h> | 25 | #include <asm/types.h> |
25 | 26 | ||
@@ -156,6 +157,10 @@ struct thread_struct { | |||
156 | #endif | 157 | #endif |
157 | struct pt_regs *regs; /* Pointer to saved register state */ | 158 | struct pt_regs *regs; /* Pointer to saved register state */ |
158 | mm_segment_t fs; /* for get_fs() validation */ | 159 | mm_segment_t fs; /* for get_fs() validation */ |
160 | #ifdef CONFIG_BOOKE | ||
161 | /* BookE base exception scratch space; align on cacheline */ | ||
162 | unsigned long normsave[8] ____cacheline_aligned; | ||
163 | #endif | ||
159 | #ifdef CONFIG_PPC32 | 164 | #ifdef CONFIG_PPC32 |
160 | void *pgdir; /* root of page-table tree */ | 165 | void *pgdir; /* root of page-table tree */ |
161 | #endif | 166 | #endif |
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index c189aa5fe1f4..b5c91901e384 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
@@ -18,24 +18,10 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | #include <asm/atomic.h> | 21 | #include <linux/atomic.h> |
22 | 22 | ||
23 | #define HAVE_ARCH_DEVTREE_FIXUPS | 23 | #define HAVE_ARCH_DEVTREE_FIXUPS |
24 | 24 | ||
25 | #ifdef CONFIG_PPC32 | ||
26 | /* | ||
27 | * PCI <-> OF matching functions | ||
28 | * (XXX should these be here?) | ||
29 | */ | ||
30 | struct pci_bus; | ||
31 | struct pci_dev; | ||
32 | extern int pci_device_from_OF_node(struct device_node *node, | ||
33 | u8* bus, u8* devfn); | ||
34 | extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int); | ||
35 | extern struct device_node* pci_device_to_OF_node(struct pci_dev *); | ||
36 | extern void pci_create_OF_bus_map(void); | ||
37 | #endif | ||
38 | |||
39 | /* | 25 | /* |
40 | * OF address retreival & translation | 26 | * OF address retreival & translation |
41 | */ | 27 | */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c5cae0dd176c..e8aaf6fce38b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -189,6 +189,9 @@ | |||
189 | #define SPRN_CTR 0x009 /* Count Register */ | 189 | #define SPRN_CTR 0x009 /* Count Register */ |
190 | #define SPRN_DSCR 0x11 | 190 | #define SPRN_DSCR 0x11 |
191 | #define SPRN_CFAR 0x1c /* Come From Address Register */ | 191 | #define SPRN_CFAR 0x1c /* Come From Address Register */ |
192 | #define SPRN_AMR 0x1d /* Authority Mask Register */ | ||
193 | #define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */ | ||
194 | #define SPRN_AMOR 0x15d /* Authority Mask Override Register */ | ||
192 | #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ | 195 | #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ |
193 | #define SPRN_CTRLF 0x088 | 196 | #define SPRN_CTRLF 0x088 |
194 | #define SPRN_CTRLT 0x098 | 197 | #define SPRN_CTRLT 0x098 |
@@ -232,22 +235,28 @@ | |||
232 | #define LPCR_VPM0 (1ul << (63-0)) | 235 | #define LPCR_VPM0 (1ul << (63-0)) |
233 | #define LPCR_VPM1 (1ul << (63-1)) | 236 | #define LPCR_VPM1 (1ul << (63-1)) |
234 | #define LPCR_ISL (1ul << (63-2)) | 237 | #define LPCR_ISL (1ul << (63-2)) |
238 | #define LPCR_VC_SH (63-2) | ||
235 | #define LPCR_DPFD_SH (63-11) | 239 | #define LPCR_DPFD_SH (63-11) |
236 | #define LPCR_VRMA_L (1ul << (63-12)) | 240 | #define LPCR_VRMA_L (1ul << (63-12)) |
237 | #define LPCR_VRMA_LP0 (1ul << (63-15)) | 241 | #define LPCR_VRMA_LP0 (1ul << (63-15)) |
238 | #define LPCR_VRMA_LP1 (1ul << (63-16)) | 242 | #define LPCR_VRMA_LP1 (1ul << (63-16)) |
243 | #define LPCR_VRMASD_SH (63-16) | ||
239 | #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ | 244 | #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ |
245 | #define LPCR_RMLS_SH (63-37) | ||
240 | #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ | 246 | #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ |
241 | #define LPCR_PECE 0x00007000 /* powersave exit cause enable */ | 247 | #define LPCR_PECE 0x00007000 /* powersave exit cause enable */ |
242 | #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ | 248 | #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ |
243 | #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ | 249 | #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ |
244 | #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ | 250 | #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ |
245 | #define LPCR_MER 0x00000800 /* Mediated External Exception */ | 251 | #define LPCR_MER 0x00000800 /* Mediated External Exception */ |
252 | #define LPCR_LPES 0x0000000c | ||
246 | #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ | 253 | #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ |
247 | #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ | 254 | #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ |
255 | #define LPCR_LPES_SH 2 | ||
248 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ | 256 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ |
249 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ | 257 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ |
250 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ | 258 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ |
259 | #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ | ||
251 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ | 260 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ |
252 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ | 261 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ |
253 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ | 262 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ |
@@ -298,6 +307,7 @@ | |||
298 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ | 307 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ |
299 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ | 308 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ |
300 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ | 309 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ |
310 | #define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */ | ||
301 | #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ | 311 | #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ |
302 | #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ | 312 | #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ |
303 | #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ | 313 | #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ |
@@ -353,6 +363,13 @@ | |||
353 | #define SPRN_IABR2 0x3FA /* 83xx */ | 363 | #define SPRN_IABR2 0x3FA /* 83xx */ |
354 | #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ | 364 | #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ |
355 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ | 365 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ |
366 | #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ | ||
367 | #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ | ||
368 | #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ | ||
369 | #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ | ||
370 | #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ | ||
371 | #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ | ||
372 | #define HID4_LPID1_SH 0 /* partition ID top 2 bits */ | ||
356 | #define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */ | 373 | #define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */ |
357 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ | 374 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ |
358 | #define SPRN_HID6 0x3F9 /* BE HID 6 */ | 375 | #define SPRN_HID6 0x3F9 /* BE HID 6 */ |
@@ -802,28 +819,28 @@ | |||
802 | mfspr rX,SPRN_SPRG_PACA; \ | 819 | mfspr rX,SPRN_SPRG_PACA; \ |
803 | FTR_SECTION_ELSE_NESTED(66); \ | 820 | FTR_SECTION_ELSE_NESTED(66); \ |
804 | mfspr rX,SPRN_SPRG_HPACA; \ | 821 | mfspr rX,SPRN_SPRG_HPACA; \ |
805 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | 822 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) |
806 | 823 | ||
807 | #define SET_PACA(rX) \ | 824 | #define SET_PACA(rX) \ |
808 | BEGIN_FTR_SECTION_NESTED(66); \ | 825 | BEGIN_FTR_SECTION_NESTED(66); \ |
809 | mtspr SPRN_SPRG_PACA,rX; \ | 826 | mtspr SPRN_SPRG_PACA,rX; \ |
810 | FTR_SECTION_ELSE_NESTED(66); \ | 827 | FTR_SECTION_ELSE_NESTED(66); \ |
811 | mtspr SPRN_SPRG_HPACA,rX; \ | 828 | mtspr SPRN_SPRG_HPACA,rX; \ |
812 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | 829 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) |
813 | 830 | ||
814 | #define GET_SCRATCH0(rX) \ | 831 | #define GET_SCRATCH0(rX) \ |
815 | BEGIN_FTR_SECTION_NESTED(66); \ | 832 | BEGIN_FTR_SECTION_NESTED(66); \ |
816 | mfspr rX,SPRN_SPRG_SCRATCH0; \ | 833 | mfspr rX,SPRN_SPRG_SCRATCH0; \ |
817 | FTR_SECTION_ELSE_NESTED(66); \ | 834 | FTR_SECTION_ELSE_NESTED(66); \ |
818 | mfspr rX,SPRN_SPRG_HSCRATCH0; \ | 835 | mfspr rX,SPRN_SPRG_HSCRATCH0; \ |
819 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | 836 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) |
820 | 837 | ||
821 | #define SET_SCRATCH0(rX) \ | 838 | #define SET_SCRATCH0(rX) \ |
822 | BEGIN_FTR_SECTION_NESTED(66); \ | 839 | BEGIN_FTR_SECTION_NESTED(66); \ |
823 | mtspr SPRN_SPRG_SCRATCH0,rX; \ | 840 | mtspr SPRN_SPRG_SCRATCH0,rX; \ |
824 | FTR_SECTION_ELSE_NESTED(66); \ | 841 | FTR_SECTION_ELSE_NESTED(66); \ |
825 | mtspr SPRN_SPRG_HSCRATCH0,rX; \ | 842 | mtspr SPRN_SPRG_HSCRATCH0,rX; \ |
826 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | 843 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66) |
827 | 844 | ||
828 | #else /* CONFIG_PPC_BOOK3S_64 */ | 845 | #else /* CONFIG_PPC_BOOK3S_64 */ |
829 | #define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0 | 846 | #define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0 |
@@ -872,8 +889,8 @@ | |||
872 | #define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W | 889 | #define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W |
873 | #define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R | 890 | #define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R |
874 | #define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W | 891 | #define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W |
875 | #define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R | 892 | #define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG1 |
876 | #define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W | 893 | #define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1 |
877 | #define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R | 894 | #define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R |
878 | #define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W | 895 | #define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W |
879 | #ifdef CONFIG_E200 | 896 | #ifdef CONFIG_E200 |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 0f0ad9fa01c1..9ec0b39f9ddc 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -318,6 +318,7 @@ | |||
318 | #define ESR_ILK 0x00100000 /* Instr. Cache Locking */ | 318 | #define ESR_ILK 0x00100000 /* Instr. Cache Locking */ |
319 | #define ESR_PUO 0x00040000 /* Unimplemented Operation exception */ | 319 | #define ESR_PUO 0x00040000 /* Unimplemented Operation exception */ |
320 | #define ESR_BO 0x00020000 /* Byte Ordering */ | 320 | #define ESR_BO 0x00020000 /* Byte Ordering */ |
321 | #define ESR_SPV 0x00000080 /* Signal Processing operation */ | ||
321 | 322 | ||
322 | /* Bit definitions related to the DBCR0. */ | 323 | /* Bit definitions related to the DBCR0. */ |
323 | #if defined(CONFIG_40x) | 324 | #if defined(CONFIG_40x) |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index dae19342f0b9..186e0fb835bd 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
@@ -3,4 +3,8 @@ | |||
3 | 3 | ||
4 | #include <asm-generic/setup.h> | 4 | #include <asm-generic/setup.h> |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | ||
7 | extern void ppc_printk_progress(char *s, unsigned short hex); | ||
8 | #endif | ||
9 | |||
6 | #endif /* _ASM_POWERPC_SETUP_H */ | 10 | #endif /* _ASM_POWERPC_SETUP_H */ |
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 11eb404b5606..15a70b7f638b 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <asm/percpu.h> | 30 | #include <asm/percpu.h> |
31 | 31 | ||
32 | extern int boot_cpuid; | 32 | extern int boot_cpuid; |
33 | extern int boot_cpu_count; | 33 | extern int spinning_secondaries; |
34 | 34 | ||
35 | extern void cpu_die(void); | 35 | extern void cpu_die(void); |
36 | 36 | ||
@@ -119,7 +119,6 @@ extern const char *smp_ipi_name[]; | |||
119 | /* for irq controllers with only a single ipi */ | 119 | /* for irq controllers with only a single ipi */ |
120 | extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); | 120 | extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); |
121 | extern void smp_muxed_ipi_message_pass(int cpu, int msg); | 121 | extern void smp_muxed_ipi_message_pass(int cpu, int msg); |
122 | extern void smp_muxed_ipi_resend(void); | ||
123 | extern irqreturn_t smp_ipi_demux(void); | 122 | extern irqreturn_t smp_ipi_demux(void); |
124 | 123 | ||
125 | void smp_init_iSeries(void); | 124 | void smp_init_iSeries(void); |
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h index e3bdada8c542..ae20ce1af4c7 100644 --- a/arch/powerpc/include/asm/smu.h +++ b/arch/powerpc/include/asm/smu.h | |||
@@ -547,7 +547,7 @@ struct smu_sdbp_header { | |||
547 | * (currently, afaik, this concerns only the FVT partition | 547 | * (currently, afaik, this concerns only the FVT partition |
548 | * (0x12) | 548 | * (0x12) |
549 | */ | 549 | */ |
550 | #define SMU_U16_MIX(x) le16_to_cpu(x); | 550 | #define SMU_U16_MIX(x) le16_to_cpu(x) |
551 | #define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8)) | 551 | #define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8)) |
552 | 552 | ||
553 | 553 | ||
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 2dc595dda03b..e30a13d1ee76 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
@@ -120,7 +120,6 @@ extern void do_dabr(struct pt_regs *regs, unsigned long address, | |||
120 | unsigned long error_code); | 120 | unsigned long error_code); |
121 | #endif | 121 | #endif |
122 | extern void print_backtrace(unsigned long *); | 122 | extern void print_backtrace(unsigned long *); |
123 | extern void show_regs(struct pt_regs * regs); | ||
124 | extern void flush_instruction_cache(void); | 123 | extern void flush_instruction_cache(void); |
125 | extern void hard_reset_now(void); | 124 | extern void hard_reset_now(void); |
126 | extern void poweroff_now(void); | 125 | extern void poweroff_now(void); |
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 58580e94a2bb..93e05d1b34b2 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h | |||
@@ -40,6 +40,7 @@ extern void udbg_adb_init_early(void); | |||
40 | 40 | ||
41 | extern void __init udbg_early_init(void); | 41 | extern void __init udbg_early_init(void); |
42 | extern void __init udbg_init_debug_lpar(void); | 42 | extern void __init udbg_init_debug_lpar(void); |
43 | extern void __init udbg_init_debug_lpar_hvsi(void); | ||
43 | extern void __init udbg_init_pmac_realmode(void); | 44 | extern void __init udbg_init_pmac_realmode(void); |
44 | extern void __init udbg_init_maple_realmode(void); | 45 | extern void __init udbg_init_maple_realmode(void); |
45 | extern void __init udbg_init_pas_realmode(void); | 46 | extern void __init udbg_init_pas_realmode(void); |