aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-07-23 03:42:46 -0400
committerAvi Kivity <avi@redhat.com>2011-09-25 12:52:30 -0400
commit19ccb76a1938ab364a412253daec64613acbf3df (patch)
tree42a3e3307355202fe0db48e2530bb42e43d9a035 /arch/powerpc/include/asm/kvm_host.h
parent02143947603fe90237a0423d34dd8943de229f78 (diff)
KVM: PPC: Implement H_CEDE hcall for book3s_hv in real-mode code
With a KVM guest operating in SMT4 mode (i.e. 4 hardware threads per core), whenever a CPU goes idle, we have to pull all the other hardware threads in the core out of the guest, because the H_CEDE hcall is handled in the kernel. This is inefficient. This adds code to book3s_hv_rmhandlers.S to handle the H_CEDE hcall in real mode. When a guest vcpu does an H_CEDE hcall, we now only exit to the kernel if all the other vcpus in the same core are also idle. Otherwise we mark this vcpu as napping, save state that could be lost in nap mode (mainly GPRs and FPRs), and execute the nap instruction. When the thread wakes up, because of a decrementer or external interrupt, we come back in at kvm_start_guest (from the system reset interrupt vector), find the `napping' flag set in the paca, and go to the resume path. This has some other ramifications. First, when starting a core, we now start all the threads, both those that are immediately runnable and those that are idle. This is so that we don't have to pull all the threads out of the guest when an idle thread gets a decrementer interrupt and wants to start running. In fact the idle threads will all start with the H_CEDE hcall returning; being idle they will just do another H_CEDE immediately and go to nap mode. This required some changes to kvmppc_run_core() and kvmppc_run_vcpu(). These functions have been restructured to make them simpler and clearer. We introduce a level of indirection in the wait queue that gets woken when external and decrementer interrupts get generated for a vcpu, so that we can have the 4 vcpus in a vcore using the same wait queue. We need this because the 4 vcpus are being handled by one thread. Secondly, when we need to exit from the guest to the kernel, we now have to generate an IPI for any napping threads, because an HDEC interrupt doesn't wake up a napping thread. Thirdly, we now need to be able to handle virtual external interrupts and decrementer interrupts becoming pending while a thread is napping, and deliver those interrupts to the guest when the thread wakes. This is done in kvmppc_cede_reentry, just before fast_guest_return. Finally, since we are not using the generic kvm_vcpu_block for book3s_hv, and hence not calling kvm_arch_vcpu_runnable, we can remove the #ifdef from kvm_arch_vcpu_runnable. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/include/asm/kvm_host.h')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h19
1 files changed, 15 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index dec3054f6ad4..bf8af5d5d5dc 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -198,21 +198,29 @@ struct kvm_arch {
198 */ 198 */
199struct kvmppc_vcore { 199struct kvmppc_vcore {
200 int n_runnable; 200 int n_runnable;
201 int n_blocked; 201 int n_busy;
202 int num_threads; 202 int num_threads;
203 int entry_exit_count; 203 int entry_exit_count;
204 int n_woken; 204 int n_woken;
205 int nap_count; 205 int nap_count;
206 int napping_threads;
206 u16 pcpu; 207 u16 pcpu;
207 u8 vcore_running; 208 u8 vcore_state;
208 u8 in_guest; 209 u8 in_guest;
209 struct list_head runnable_threads; 210 struct list_head runnable_threads;
210 spinlock_t lock; 211 spinlock_t lock;
212 wait_queue_head_t wq;
211}; 213};
212 214
213#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 215#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
214#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8) 216#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
215 217
218/* Values for vcore_state */
219#define VCORE_INACTIVE 0
220#define VCORE_RUNNING 1
221#define VCORE_EXITING 2
222#define VCORE_SLEEPING 3
223
216struct kvmppc_pte { 224struct kvmppc_pte {
217 ulong eaddr; 225 ulong eaddr;
218 u64 vpage; 226 u64 vpage;
@@ -403,11 +411,13 @@ struct kvm_vcpu_arch {
403 struct dtl *dtl; 411 struct dtl *dtl;
404 struct dtl *dtl_end; 412 struct dtl *dtl_end;
405 413
414 wait_queue_head_t *wqp;
406 struct kvmppc_vcore *vcore; 415 struct kvmppc_vcore *vcore;
407 int ret; 416 int ret;
408 int trap; 417 int trap;
409 int state; 418 int state;
410 int ptid; 419 int ptid;
420 bool timer_running;
411 wait_queue_head_t cpu_run; 421 wait_queue_head_t cpu_run;
412 422
413 struct kvm_vcpu_arch_shared *shared; 423 struct kvm_vcpu_arch_shared *shared;
@@ -423,8 +433,9 @@ struct kvm_vcpu_arch {
423#endif 433#endif
424}; 434};
425 435
426#define KVMPPC_VCPU_BUSY_IN_HOST 0 436/* Values for vcpu->arch.state */
427#define KVMPPC_VCPU_BLOCKED 1 437#define KVMPPC_VCPU_STOPPED 0
438#define KVMPPC_VCPU_BUSY_IN_HOST 1
428#define KVMPPC_VCPU_RUNNABLE 2 439#define KVMPPC_VCPU_RUNNABLE 2
429 440
430#endif /* __POWERPC_KVM_HOST_H__ */ 441#endif /* __POWERPC_KVM_HOST_H__ */