aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLiu Yu-B13201 <Yu.Liu@freescale.com>2012-07-03 01:48:53 -0400
committerAlexander Graf <agraf@suse.de>2012-10-05 17:38:37 -0400
commit2f979de8a716bdbdc9f4db532652fbca08ed710c (patch)
tree14f51a0b1c1b71f6981d4723a165fe284c70f38d /arch/powerpc/kernel
parent9202e07636f0c4858ba6c30773a3f160b2b5659a (diff)
KVM: PPC: ev_idle hcall support for e500 guests
Signed-off-by: Liu Yu <yu.liu@freescale.com> [varun: 64-bit changes] Signed-off-by: Varun Sethi <Varun.Sethi@freescale.com> Signed-off-by: Stuart Yoder <stuart.yoder@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S28
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c11
2 files changed, 38 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 697b390ebfd8..62c0dc237826 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -8,13 +8,41 @@
8 */ 8 */
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <asm/epapr_hcalls.h>
11#include <asm/reg.h> 12#include <asm/reg.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/cputable.h> 14#include <asm/cputable.h>
14#include <asm/thread_info.h> 15#include <asm/thread_info.h>
15#include <asm/ppc_asm.h> 16#include <asm/ppc_asm.h>
17#include <asm/asm-compat.h>
16#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
17 19
20/* epapr_ev_idle() was derived from e500_idle() */
21_GLOBAL(epapr_ev_idle)
22 CURRENT_THREAD_INFO(r3, r1)
23 PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */
24 ori r4, r4,_TLF_NAPPING /* so when we take an exception */
25 PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */
26
27 wrteei 1
28
29idle_loop:
30 LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
31
32.global epapr_ev_idle_start
33epapr_ev_idle_start:
34 li r3, -1
35 nop
36 nop
37 nop
38
39 /*
40 * Guard against spurious wakeups from a hypervisor --
41 * only interrupt will cause us to return to LR due to
42 * _TLF_NAPPING.
43 */
44 b idle_loop
45
18/* Hypercall entry point. Will be patched with device tree instructions. */ 46/* Hypercall entry point. Will be patched with device tree instructions. */
19.global epapr_hypercall_start 47.global epapr_hypercall_start
20epapr_hypercall_start: 48epapr_hypercall_start:
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 028aeae370b6..f3eab8594d9f 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -21,6 +21,10 @@
21#include <asm/epapr_hcalls.h> 21#include <asm/epapr_hcalls.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h>
25
26extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[];
24 28
25bool epapr_paravirt_enabled; 29bool epapr_paravirt_enabled;
26 30
@@ -41,8 +45,13 @@ static int __init epapr_paravirt_init(void)
41 if (len % 4 || len > (4 * 4)) 45 if (len % 4 || len > (4 * 4))
42 return -ENODEV; 46 return -ENODEV;
43 47
44 for (i = 0; i < (len / 4); i++) 48 for (i = 0; i < (len / 4); i++) {
45 patch_instruction(epapr_hypercall_start + i, insts[i]); 49 patch_instruction(epapr_hypercall_start + i, insts[i]);
50 patch_instruction(epapr_ev_idle_start + i, insts[i]);
51 }
52
53 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle;
46 55
47 epapr_paravirt_enabled = true; 56 epapr_paravirt_enabled = true;
48 57