diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 21:40:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 21:40:35 -0400 |
commit | ec0afc9311adcfb10b90e547c23250f63939f990 (patch) | |
tree | 2093d2668898a8a03f30acbfd5568e65b8c086b9 /include/linux | |
parent | 804f18536984939622ddca60ab6b25743e0ec68d (diff) | |
parent | 776e58ea3d3735f85678155398241d2513afa67a (diff) |
Merge branch 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.39' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (55 commits)
KVM: unbreak userspace that does not sets tss address
KVM: MMU: cleanup pte write path
KVM: MMU: introduce a common function to get no-dirty-logged slot
KVM: fix rcu usage in init_rmode_* functions
KVM: fix kvmclock regression due to missing clock update
KVM: emulator: Fix permission checking in io permission bitmap
KVM: emulator: Fix io permission checking for 64bit guest
KVM: SVM: Load %gs earlier if CONFIG_X86_32_LAZY_GS=n
KVM: x86: Remove useless regs_page pointer from kvm_lapic
KVM: improve comment on rcu use in irqfd_deassign
KVM: MMU: remove unused macros
KVM: MMU: cleanup page alloc and free
KVM: MMU: do not record gfn in kvm_mmu_pte_write
KVM: MMU: move mmu pages calculated out of mmu lock
KVM: MMU: set spte accessed bit properly
KVM: MMU: fix kvm_mmu_slot_remove_write_access dropping intermediate W bits
KVM: Start lock documentation
KVM: better readability of efer_reserved_bits
KVM: Clear async page fault hash after switching to real mode
KVM: VMX: Initialize vm86 TSS only once.
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/kvm_host.h | 31 | ||||
-rw-r--r-- | include/linux/mm.h | 13 |
2 files changed, 25 insertions, 19 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b5021db21858..ab428552af8e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define KVM_REQ_DEACTIVATE_FPU 10 | 43 | #define KVM_REQ_DEACTIVATE_FPU 10 |
44 | #define KVM_REQ_EVENT 11 | 44 | #define KVM_REQ_EVENT 11 |
45 | #define KVM_REQ_APF_HALT 12 | 45 | #define KVM_REQ_APF_HALT 12 |
46 | #define KVM_REQ_NMI 13 | ||
46 | 47 | ||
47 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 48 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
48 | 49 | ||
@@ -98,23 +99,31 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | |||
98 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); | 99 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
99 | #endif | 100 | #endif |
100 | 101 | ||
102 | enum { | ||
103 | OUTSIDE_GUEST_MODE, | ||
104 | IN_GUEST_MODE, | ||
105 | EXITING_GUEST_MODE | ||
106 | }; | ||
107 | |||
101 | struct kvm_vcpu { | 108 | struct kvm_vcpu { |
102 | struct kvm *kvm; | 109 | struct kvm *kvm; |
103 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 110 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
104 | struct preempt_notifier preempt_notifier; | 111 | struct preempt_notifier preempt_notifier; |
105 | #endif | 112 | #endif |
113 | int cpu; | ||
106 | int vcpu_id; | 114 | int vcpu_id; |
107 | struct mutex mutex; | 115 | int srcu_idx; |
108 | int cpu; | 116 | int mode; |
109 | atomic_t guest_mode; | ||
110 | struct kvm_run *run; | ||
111 | unsigned long requests; | 117 | unsigned long requests; |
112 | unsigned long guest_debug; | 118 | unsigned long guest_debug; |
113 | int srcu_idx; | 119 | |
120 | struct mutex mutex; | ||
121 | struct kvm_run *run; | ||
114 | 122 | ||
115 | int fpu_active; | 123 | int fpu_active; |
116 | int guest_fpu_loaded, guest_xcr0_loaded; | 124 | int guest_fpu_loaded, guest_xcr0_loaded; |
117 | wait_queue_head_t wq; | 125 | wait_queue_head_t wq; |
126 | struct pid *pid; | ||
118 | int sigset_active; | 127 | int sigset_active; |
119 | sigset_t sigset; | 128 | sigset_t sigset; |
120 | struct kvm_vcpu_stat stat; | 129 | struct kvm_vcpu_stat stat; |
@@ -140,6 +149,11 @@ struct kvm_vcpu { | |||
140 | struct kvm_vcpu_arch arch; | 149 | struct kvm_vcpu_arch arch; |
141 | }; | 150 | }; |
142 | 151 | ||
152 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) | ||
153 | { | ||
154 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); | ||
155 | } | ||
156 | |||
143 | /* | 157 | /* |
144 | * Some of the bitops functions do not support too long bitmaps. | 158 | * Some of the bitops functions do not support too long bitmaps. |
145 | * This number must be determined not to exceed such limits. | 159 | * This number must be determined not to exceed such limits. |
@@ -212,7 +226,6 @@ struct kvm_memslots { | |||
212 | 226 | ||
213 | struct kvm { | 227 | struct kvm { |
214 | spinlock_t mmu_lock; | 228 | spinlock_t mmu_lock; |
215 | raw_spinlock_t requests_lock; | ||
216 | struct mutex slots_lock; | 229 | struct mutex slots_lock; |
217 | struct mm_struct *mm; /* userspace tied to this vm */ | 230 | struct mm_struct *mm; /* userspace tied to this vm */ |
218 | struct kvm_memslots *memslots; | 231 | struct kvm_memslots *memslots; |
@@ -223,6 +236,7 @@ struct kvm { | |||
223 | #endif | 236 | #endif |
224 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | 237 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
225 | atomic_t online_vcpus; | 238 | atomic_t online_vcpus; |
239 | int last_boosted_vcpu; | ||
226 | struct list_head vm_list; | 240 | struct list_head vm_list; |
227 | struct mutex lock; | 241 | struct mutex lock; |
228 | struct kvm_io_bus *buses[KVM_NR_BUSES]; | 242 | struct kvm_io_bus *buses[KVM_NR_BUSES]; |
@@ -719,11 +733,6 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | |||
719 | set_bit(req, &vcpu->requests); | 733 | set_bit(req, &vcpu->requests); |
720 | } | 734 | } |
721 | 735 | ||
722 | static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu) | ||
723 | { | ||
724 | return test_and_set_bit(req, &vcpu->requests); | ||
725 | } | ||
726 | |||
727 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | 736 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
728 | { | 737 | { |
729 | if (test_bit(req, &vcpu->requests)) { | 738 | if (test_bit(req, &vcpu->requests)) { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index ff83798e1c27..581703d86fbd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -972,6 +972,10 @@ static inline int handle_mm_fault(struct mm_struct *mm, | |||
972 | extern int make_pages_present(unsigned long addr, unsigned long end); | 972 | extern int make_pages_present(unsigned long addr, unsigned long end); |
973 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | 973 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); |
974 | 974 | ||
975 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
976 | unsigned long start, int len, unsigned int foll_flags, | ||
977 | struct page **pages, struct vm_area_struct **vmas, | ||
978 | int *nonblocking); | ||
975 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 979 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
976 | unsigned long start, int nr_pages, int write, int force, | 980 | unsigned long start, int nr_pages, int write, int force, |
977 | struct page **pages, struct vm_area_struct **vmas); | 981 | struct page **pages, struct vm_area_struct **vmas); |
@@ -1535,6 +1539,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, | |||
1535 | #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ | 1539 | #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ |
1536 | #define FOLL_MLOCK 0x40 /* mark page as mlocked */ | 1540 | #define FOLL_MLOCK 0x40 /* mark page as mlocked */ |
1537 | #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ | 1541 | #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ |
1542 | #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ | ||
1538 | 1543 | ||
1539 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | 1544 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, |
1540 | void *data); | 1545 | void *data); |
@@ -1627,14 +1632,6 @@ extern int sysctl_memory_failure_recovery; | |||
1627 | extern void shake_page(struct page *p, int access); | 1632 | extern void shake_page(struct page *p, int access); |
1628 | extern atomic_long_t mce_bad_pages; | 1633 | extern atomic_long_t mce_bad_pages; |
1629 | extern int soft_offline_page(struct page *page, int flags); | 1634 | extern int soft_offline_page(struct page *page, int flags); |
1630 | #ifdef CONFIG_MEMORY_FAILURE | ||
1631 | int is_hwpoison_address(unsigned long addr); | ||
1632 | #else | ||
1633 | static inline int is_hwpoison_address(unsigned long addr) | ||
1634 | { | ||
1635 | return 0; | ||
1636 | } | ||
1637 | #endif | ||
1638 | 1635 | ||
1639 | extern void dump_page(struct page *page); | 1636 | extern void dump_page(struct page *page); |
1640 | 1637 | ||