diff options
author | Matias Zabaljauregui <zabaljauregui@gmail.com> | 2009-06-13 00:27:07 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-06-12 08:57:07 -0400 |
commit | cefcad1773197523e11e18b669f245e6a8d32058 (patch) | |
tree | 2d94e3df551b3ee58c1ac89ed4872e2153496c05 /arch/x86/lguest/boot.c | |
parent | ebe0ba84f55950a89cb7af94c7ffc35ee3992f9e (diff) |
lguest: Add support for kvm_hypercall4()
Add support for kvm_hypercall4(); PAE wants it.
Signed-off-by: Matias Zabaljauregui <zabaljauregui at gmail.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/x86/lguest/boot.c')
-rw-r--r-- | arch/x86/lguest/boot.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 943a75ef70b9..d12f554e5f6a 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -87,7 +87,7 @@ struct lguest_data lguest_data = { | |||
87 | 87 | ||
88 | /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a | 88 | /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a |
89 | * ring buffer of stored hypercalls which the Host will run though next time we | 89 | * ring buffer of stored hypercalls which the Host will run though next time we |
90 | * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall | 90 | * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall |
91 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, | 91 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, |
92 | * and 255 once the Host has finished with it. | 92 | * and 255 once the Host has finished with it. |
93 | * | 93 | * |
@@ -96,7 +96,8 @@ struct lguest_data lguest_data = { | |||
96 | * effect of causing the Host to run all the stored calls in the ring buffer | 96 | * effect of causing the Host to run all the stored calls in the ring buffer |
97 | * which empties it for next time! */ | 97 | * which empties it for next time! */ |
98 | static void async_hcall(unsigned long call, unsigned long arg1, | 98 | static void async_hcall(unsigned long call, unsigned long arg1, |
99 | unsigned long arg2, unsigned long arg3) | 99 | unsigned long arg2, unsigned long arg3, |
100 | unsigned long arg4) | ||
100 | { | 101 | { |
101 | /* Note: This code assumes we're uniprocessor. */ | 102 | /* Note: This code assumes we're uniprocessor. */ |
102 | static unsigned int next_call; | 103 | static unsigned int next_call; |
@@ -108,12 +109,13 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
108 | local_irq_save(flags); | 109 | local_irq_save(flags); |
109 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 110 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
110 | /* Table full, so do normal hcall which will flush table. */ | 111 | /* Table full, so do normal hcall which will flush table. */ |
111 | kvm_hypercall3(call, arg1, arg2, arg3); | 112 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); |
112 | } else { | 113 | } else { |
113 | lguest_data.hcalls[next_call].arg0 = call; | 114 | lguest_data.hcalls[next_call].arg0 = call; |
114 | lguest_data.hcalls[next_call].arg1 = arg1; | 115 | lguest_data.hcalls[next_call].arg1 = arg1; |
115 | lguest_data.hcalls[next_call].arg2 = arg2; | 116 | lguest_data.hcalls[next_call].arg2 = arg2; |
116 | lguest_data.hcalls[next_call].arg3 = arg3; | 117 | lguest_data.hcalls[next_call].arg3 = arg3; |
118 | lguest_data.hcalls[next_call].arg4 = arg4; | ||
117 | /* Arguments must all be written before we mark it to go */ | 119 | /* Arguments must all be written before we mark it to go */ |
118 | wmb(); | 120 | wmb(); |
119 | lguest_data.hcall_status[next_call] = 0; | 121 | lguest_data.hcall_status[next_call] = 0; |
@@ -141,7 +143,7 @@ static void lazy_hcall1(unsigned long call, | |||
141 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 143 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
142 | kvm_hypercall1(call, arg1); | 144 | kvm_hypercall1(call, arg1); |
143 | else | 145 | else |
144 | async_hcall(call, arg1, 0, 0); | 146 | async_hcall(call, arg1, 0, 0, 0); |
145 | } | 147 | } |
146 | 148 | ||
147 | static void lazy_hcall2(unsigned long call, | 149 | static void lazy_hcall2(unsigned long call, |
@@ -151,7 +153,7 @@ static void lazy_hcall2(unsigned long call, | |||
151 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 153 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
152 | kvm_hypercall2(call, arg1, arg2); | 154 | kvm_hypercall2(call, arg1, arg2); |
153 | else | 155 | else |
154 | async_hcall(call, arg1, arg2, 0); | 156 | async_hcall(call, arg1, arg2, 0, 0); |
155 | } | 157 | } |
156 | 158 | ||
157 | static void lazy_hcall3(unsigned long call, | 159 | static void lazy_hcall3(unsigned long call, |
@@ -162,7 +164,19 @@ static void lazy_hcall3(unsigned long call, | |||
162 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 164 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
163 | kvm_hypercall3(call, arg1, arg2, arg3); | 165 | kvm_hypercall3(call, arg1, arg2, arg3); |
164 | else | 166 | else |
165 | async_hcall(call, arg1, arg2, arg3); | 167 | async_hcall(call, arg1, arg2, arg3, 0); |
168 | } | ||
169 | |||
170 | static void lazy_hcall4(unsigned long call, | ||
171 | unsigned long arg1, | ||
172 | unsigned long arg2, | ||
173 | unsigned long arg3, | ||
174 | unsigned long arg4) | ||
175 | { | ||
176 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | ||
177 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | ||
178 | else | ||
179 | async_hcall(call, arg1, arg2, arg3, arg4); | ||
166 | } | 180 | } |
167 | 181 | ||
168 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then | 182 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then |