diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-21 11:19:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-21 11:19:50 -0400 |
commit | eb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0 (patch) | |
tree | 5ac6f43899648abeab1d43aad3107f664e7f13d5 /virt | |
parent | c4762aba0b1f72659aae9ce37b772ca8bd8f06f4 (diff) | |
parent | 14b395e35d1afdd8019d11b92e28041fad591b71 (diff) |
Merge branch 'linus' into cpus4096-for-linus
Conflicts:
net/sunrpc/svc.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 156 | ||||
-rw-r--r-- | virt/kvm/coalesced_mmio.h | 23 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 23 | ||||
-rw-r--r-- | virt/kvm/iodev.h | 8 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 153 | ||||
-rw-r--r-- | virt/kvm/kvm_trace.c | 18 |
6 files changed, 346 insertions, 35 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c new file mode 100644 index 000000000000..5ae620d32fac --- /dev/null +++ b/virt/kvm/coalesced_mmio.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * KVM coalesced MMIO | ||
3 | * | ||
4 | * Copyright (c) 2008 Bull S.A.S. | ||
5 | * | ||
6 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include "iodev.h" | ||
11 | |||
12 | #include <linux/kvm_host.h> | ||
13 | #include <linux/kvm.h> | ||
14 | |||
15 | #include "coalesced_mmio.h" | ||
16 | |||
17 | static int coalesced_mmio_in_range(struct kvm_io_device *this, | ||
18 | gpa_t addr, int len, int is_write) | ||
19 | { | ||
20 | struct kvm_coalesced_mmio_dev *dev = | ||
21 | (struct kvm_coalesced_mmio_dev*)this->private; | ||
22 | struct kvm_coalesced_mmio_zone *zone; | ||
23 | int next; | ||
24 | int i; | ||
25 | |||
26 | if (!is_write) | ||
27 | return 0; | ||
28 | |||
29 | /* kvm->lock is taken by the caller and must be not released before | ||
30 | * dev.read/write | ||
31 | */ | ||
32 | |||
33 | /* Are we able to batch it ? */ | ||
34 | |||
35 | /* last is the first free entry | ||
36 | * check if we don't meet the first used entry | ||
37 | * there is always one unused entry in the buffer | ||
38 | */ | ||
39 | |||
40 | next = (dev->kvm->coalesced_mmio_ring->last + 1) % | ||
41 | KVM_COALESCED_MMIO_MAX; | ||
42 | if (next == dev->kvm->coalesced_mmio_ring->first) { | ||
43 | /* full */ | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | /* is it in a batchable area ? */ | ||
48 | |||
49 | for (i = 0; i < dev->nb_zones; i++) { | ||
50 | zone = &dev->zone[i]; | ||
51 | |||
52 | /* (addr,len) is fully included in | ||
53 | * (zone->addr, zone->size) | ||
54 | */ | ||
55 | |||
56 | if (zone->addr <= addr && | ||
57 | addr + len <= zone->addr + zone->size) | ||
58 | return 1; | ||
59 | } | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void coalesced_mmio_write(struct kvm_io_device *this, | ||
64 | gpa_t addr, int len, const void *val) | ||
65 | { | ||
66 | struct kvm_coalesced_mmio_dev *dev = | ||
67 | (struct kvm_coalesced_mmio_dev*)this->private; | ||
68 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; | ||
69 | |||
70 | /* kvm->lock must be taken by caller before call to in_range()*/ | ||
71 | |||
72 | /* copy data in first free entry of the ring */ | ||
73 | |||
74 | ring->coalesced_mmio[ring->last].phys_addr = addr; | ||
75 | ring->coalesced_mmio[ring->last].len = len; | ||
76 | memcpy(ring->coalesced_mmio[ring->last].data, val, len); | ||
77 | smp_wmb(); | ||
78 | ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; | ||
79 | } | ||
80 | |||
81 | static void coalesced_mmio_destructor(struct kvm_io_device *this) | ||
82 | { | ||
83 | kfree(this); | ||
84 | } | ||
85 | |||
86 | int kvm_coalesced_mmio_init(struct kvm *kvm) | ||
87 | { | ||
88 | struct kvm_coalesced_mmio_dev *dev; | ||
89 | |||
90 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); | ||
91 | if (!dev) | ||
92 | return -ENOMEM; | ||
93 | dev->dev.write = coalesced_mmio_write; | ||
94 | dev->dev.in_range = coalesced_mmio_in_range; | ||
95 | dev->dev.destructor = coalesced_mmio_destructor; | ||
96 | dev->dev.private = dev; | ||
97 | dev->kvm = kvm; | ||
98 | kvm->coalesced_mmio_dev = dev; | ||
99 | kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | ||
105 | struct kvm_coalesced_mmio_zone *zone) | ||
106 | { | ||
107 | struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; | ||
108 | |||
109 | if (dev == NULL) | ||
110 | return -EINVAL; | ||
111 | |||
112 | mutex_lock(&kvm->lock); | ||
113 | if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { | ||
114 | mutex_unlock(&kvm->lock); | ||
115 | return -ENOBUFS; | ||
116 | } | ||
117 | |||
118 | dev->zone[dev->nb_zones] = *zone; | ||
119 | dev->nb_zones++; | ||
120 | |||
121 | mutex_unlock(&kvm->lock); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | ||
126 | struct kvm_coalesced_mmio_zone *zone) | ||
127 | { | ||
128 | int i; | ||
129 | struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; | ||
130 | struct kvm_coalesced_mmio_zone *z; | ||
131 | |||
132 | if (dev == NULL) | ||
133 | return -EINVAL; | ||
134 | |||
135 | mutex_lock(&kvm->lock); | ||
136 | |||
137 | i = dev->nb_zones; | ||
138 | while(i) { | ||
139 | z = &dev->zone[i - 1]; | ||
140 | |||
141 | /* unregister all zones | ||
142 | * included in (zone->addr, zone->size) | ||
143 | */ | ||
144 | |||
145 | if (zone->addr <= z->addr && | ||
146 | z->addr + z->size <= zone->addr + zone->size) { | ||
147 | dev->nb_zones--; | ||
148 | *z = dev->zone[dev->nb_zones]; | ||
149 | } | ||
150 | i--; | ||
151 | } | ||
152 | |||
153 | mutex_unlock(&kvm->lock); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h new file mode 100644 index 000000000000..5ac0ec628461 --- /dev/null +++ b/virt/kvm/coalesced_mmio.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * KVM coalesced MMIO | ||
3 | * | ||
4 | * Copyright (c) 2008 Bull S.A.S. | ||
5 | * | ||
6 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #define KVM_COALESCED_MMIO_ZONE_MAX 100 | ||
11 | |||
12 | struct kvm_coalesced_mmio_dev { | ||
13 | struct kvm_io_device dev; | ||
14 | struct kvm *kvm; | ||
15 | int nb_zones; | ||
16 | struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX]; | ||
17 | }; | ||
18 | |||
19 | int kvm_coalesced_mmio_init(struct kvm *kvm); | ||
20 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | ||
21 | struct kvm_coalesced_mmio_zone *zone); | ||
22 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | ||
23 | struct kvm_coalesced_mmio_zone *zone); | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 44589088941f..c0d22870ee9c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -146,6 +146,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic, | |||
146 | return kvm_apic_set_irq(vcpu, vector, trig_mode); | 146 | return kvm_apic_set_irq(vcpu, vector, trig_mode); |
147 | } | 147 | } |
148 | 148 | ||
149 | static void ioapic_inj_nmi(struct kvm_vcpu *vcpu) | ||
150 | { | ||
151 | kvm_inject_nmi(vcpu); | ||
152 | } | ||
153 | |||
149 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | 154 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, |
150 | u8 dest_mode) | 155 | u8 dest_mode) |
151 | { | 156 | { |
@@ -239,8 +244,19 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
239 | } | 244 | } |
240 | } | 245 | } |
241 | break; | 246 | break; |
242 | 247 | case IOAPIC_NMI: | |
243 | /* TODO: NMI */ | 248 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { |
249 | if (!(deliver_bitmask & (1 << vcpu_id))) | ||
250 | continue; | ||
251 | deliver_bitmask &= ~(1 << vcpu_id); | ||
252 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
253 | if (vcpu) | ||
254 | ioapic_inj_nmi(vcpu); | ||
255 | else | ||
256 | ioapic_debug("NMI to vcpu %d failed\n", | ||
257 | vcpu->vcpu_id); | ||
258 | } | ||
259 | break; | ||
244 | default: | 260 | default: |
245 | printk(KERN_WARNING "Unsupported delivery mode %d\n", | 261 | printk(KERN_WARNING "Unsupported delivery mode %d\n", |
246 | delivery_mode); | 262 | delivery_mode); |
@@ -291,7 +307,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector) | |||
291 | __kvm_ioapic_update_eoi(ioapic, i); | 307 | __kvm_ioapic_update_eoi(ioapic, i); |
292 | } | 308 | } |
293 | 309 | ||
294 | static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr) | 310 | static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr, |
311 | int len, int is_write) | ||
295 | { | 312 | { |
296 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; | 313 | struct kvm_ioapic *ioapic = (struct kvm_ioapic *)this->private; |
297 | 314 | ||
diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h index c14e642027b2..55e8846ac3a6 100644 --- a/virt/kvm/iodev.h +++ b/virt/kvm/iodev.h | |||
@@ -27,7 +27,8 @@ struct kvm_io_device { | |||
27 | gpa_t addr, | 27 | gpa_t addr, |
28 | int len, | 28 | int len, |
29 | const void *val); | 29 | const void *val); |
30 | int (*in_range)(struct kvm_io_device *this, gpa_t addr); | 30 | int (*in_range)(struct kvm_io_device *this, gpa_t addr, int len, |
31 | int is_write); | ||
31 | void (*destructor)(struct kvm_io_device *this); | 32 | void (*destructor)(struct kvm_io_device *this); |
32 | 33 | ||
33 | void *private; | 34 | void *private; |
@@ -49,9 +50,10 @@ static inline void kvm_iodevice_write(struct kvm_io_device *dev, | |||
49 | dev->write(dev, addr, len, val); | 50 | dev->write(dev, addr, len, val); |
50 | } | 51 | } |
51 | 52 | ||
52 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr) | 53 | static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, |
54 | gpa_t addr, int len, int is_write) | ||
53 | { | 55 | { |
54 | return dev->in_range(dev, addr); | 56 | return dev->in_range(dev, addr, len, is_write); |
55 | } | 57 | } |
56 | 58 | ||
57 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) | 59 | static inline void kvm_iodevice_destructor(struct kvm_io_device *dev) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d4eae6af0738..904d7b7bd780 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -47,6 +47,10 @@ | |||
47 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
48 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
49 | 49 | ||
50 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
51 | #include "coalesced_mmio.h" | ||
52 | #endif | ||
53 | |||
50 | MODULE_AUTHOR("Qumranet"); | 54 | MODULE_AUTHOR("Qumranet"); |
51 | MODULE_LICENSE("GPL"); | 55 | MODULE_LICENSE("GPL"); |
52 | 56 | ||
@@ -65,6 +69,8 @@ struct dentry *kvm_debugfs_dir; | |||
65 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 69 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
66 | unsigned long arg); | 70 | unsigned long arg); |
67 | 71 | ||
72 | bool kvm_rebooting; | ||
73 | |||
68 | static inline int valid_vcpu(int n) | 74 | static inline int valid_vcpu(int n) |
69 | { | 75 | { |
70 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | 76 | return likely(n >= 0 && n < KVM_MAX_VCPUS); |
@@ -99,10 +105,11 @@ static void ack_flush(void *_completed) | |||
99 | 105 | ||
100 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 106 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
101 | { | 107 | { |
102 | int i, cpu; | 108 | int i, cpu, me; |
103 | cpumask_t cpus; | 109 | cpumask_t cpus; |
104 | struct kvm_vcpu *vcpu; | 110 | struct kvm_vcpu *vcpu; |
105 | 111 | ||
112 | me = get_cpu(); | ||
106 | cpus_clear(cpus); | 113 | cpus_clear(cpus); |
107 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 114 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
108 | vcpu = kvm->vcpus[i]; | 115 | vcpu = kvm->vcpus[i]; |
@@ -111,21 +118,24 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) | |||
111 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | 118 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) |
112 | continue; | 119 | continue; |
113 | cpu = vcpu->cpu; | 120 | cpu = vcpu->cpu; |
114 | if (cpu != -1 && cpu != raw_smp_processor_id()) | 121 | if (cpu != -1 && cpu != me) |
115 | cpu_set(cpu, cpus); | 122 | cpu_set(cpu, cpus); |
116 | } | 123 | } |
117 | if (cpus_empty(cpus)) | 124 | if (cpus_empty(cpus)) |
118 | return; | 125 | goto out; |
119 | ++kvm->stat.remote_tlb_flush; | 126 | ++kvm->stat.remote_tlb_flush; |
120 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | 127 | smp_call_function_mask(cpus, ack_flush, NULL, 1); |
128 | out: | ||
129 | put_cpu(); | ||
121 | } | 130 | } |
122 | 131 | ||
123 | void kvm_reload_remote_mmus(struct kvm *kvm) | 132 | void kvm_reload_remote_mmus(struct kvm *kvm) |
124 | { | 133 | { |
125 | int i, cpu; | 134 | int i, cpu, me; |
126 | cpumask_t cpus; | 135 | cpumask_t cpus; |
127 | struct kvm_vcpu *vcpu; | 136 | struct kvm_vcpu *vcpu; |
128 | 137 | ||
138 | me = get_cpu(); | ||
129 | cpus_clear(cpus); | 139 | cpus_clear(cpus); |
130 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 140 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
131 | vcpu = kvm->vcpus[i]; | 141 | vcpu = kvm->vcpus[i]; |
@@ -134,12 +144,14 @@ void kvm_reload_remote_mmus(struct kvm *kvm) | |||
134 | if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 144 | if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
135 | continue; | 145 | continue; |
136 | cpu = vcpu->cpu; | 146 | cpu = vcpu->cpu; |
137 | if (cpu != -1 && cpu != raw_smp_processor_id()) | 147 | if (cpu != -1 && cpu != me) |
138 | cpu_set(cpu, cpus); | 148 | cpu_set(cpu, cpus); |
139 | } | 149 | } |
140 | if (cpus_empty(cpus)) | 150 | if (cpus_empty(cpus)) |
141 | return; | 151 | goto out; |
142 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | 152 | smp_call_function_mask(cpus, ack_flush, NULL, 1); |
153 | out: | ||
154 | put_cpu(); | ||
143 | } | 155 | } |
144 | 156 | ||
145 | 157 | ||
@@ -183,10 +195,23 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); | |||
183 | static struct kvm *kvm_create_vm(void) | 195 | static struct kvm *kvm_create_vm(void) |
184 | { | 196 | { |
185 | struct kvm *kvm = kvm_arch_create_vm(); | 197 | struct kvm *kvm = kvm_arch_create_vm(); |
198 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
199 | struct page *page; | ||
200 | #endif | ||
186 | 201 | ||
187 | if (IS_ERR(kvm)) | 202 | if (IS_ERR(kvm)) |
188 | goto out; | 203 | goto out; |
189 | 204 | ||
205 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
206 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
207 | if (!page) { | ||
208 | kfree(kvm); | ||
209 | return ERR_PTR(-ENOMEM); | ||
210 | } | ||
211 | kvm->coalesced_mmio_ring = | ||
212 | (struct kvm_coalesced_mmio_ring *)page_address(page); | ||
213 | #endif | ||
214 | |||
190 | kvm->mm = current->mm; | 215 | kvm->mm = current->mm; |
191 | atomic_inc(&kvm->mm->mm_count); | 216 | atomic_inc(&kvm->mm->mm_count); |
192 | spin_lock_init(&kvm->mmu_lock); | 217 | spin_lock_init(&kvm->mmu_lock); |
@@ -198,6 +223,9 @@ static struct kvm *kvm_create_vm(void) | |||
198 | spin_lock(&kvm_lock); | 223 | spin_lock(&kvm_lock); |
199 | list_add(&kvm->vm_list, &vm_list); | 224 | list_add(&kvm->vm_list, &vm_list); |
200 | spin_unlock(&kvm_lock); | 225 | spin_unlock(&kvm_lock); |
226 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
227 | kvm_coalesced_mmio_init(kvm); | ||
228 | #endif | ||
201 | out: | 229 | out: |
202 | return kvm; | 230 | return kvm; |
203 | } | 231 | } |
@@ -240,6 +268,10 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
240 | spin_unlock(&kvm_lock); | 268 | spin_unlock(&kvm_lock); |
241 | kvm_io_bus_destroy(&kvm->pio_bus); | 269 | kvm_io_bus_destroy(&kvm->pio_bus); |
242 | kvm_io_bus_destroy(&kvm->mmio_bus); | 270 | kvm_io_bus_destroy(&kvm->mmio_bus); |
271 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
272 | if (kvm->coalesced_mmio_ring != NULL) | ||
273 | free_page((unsigned long)kvm->coalesced_mmio_ring); | ||
274 | #endif | ||
243 | kvm_arch_destroy_vm(kvm); | 275 | kvm_arch_destroy_vm(kvm); |
244 | mmdrop(mm); | 276 | mmdrop(mm); |
245 | } | 277 | } |
@@ -333,6 +365,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
333 | r = -ENOMEM; | 365 | r = -ENOMEM; |
334 | 366 | ||
335 | /* Allocate if a slot is being created */ | 367 | /* Allocate if a slot is being created */ |
368 | #ifndef CONFIG_S390 | ||
336 | if (npages && !new.rmap) { | 369 | if (npages && !new.rmap) { |
337 | new.rmap = vmalloc(npages * sizeof(struct page *)); | 370 | new.rmap = vmalloc(npages * sizeof(struct page *)); |
338 | 371 | ||
@@ -373,10 +406,14 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
373 | goto out_free; | 406 | goto out_free; |
374 | memset(new.dirty_bitmap, 0, dirty_bytes); | 407 | memset(new.dirty_bitmap, 0, dirty_bytes); |
375 | } | 408 | } |
409 | #endif /* not defined CONFIG_S390 */ | ||
376 | 410 | ||
377 | if (mem->slot >= kvm->nmemslots) | 411 | if (mem->slot >= kvm->nmemslots) |
378 | kvm->nmemslots = mem->slot + 1; | 412 | kvm->nmemslots = mem->slot + 1; |
379 | 413 | ||
414 | if (!npages) | ||
415 | kvm_arch_flush_shadow(kvm); | ||
416 | |||
380 | *memslot = new; | 417 | *memslot = new; |
381 | 418 | ||
382 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); | 419 | r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); |
@@ -532,6 +569,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | |||
532 | struct page *page[1]; | 569 | struct page *page[1]; |
533 | unsigned long addr; | 570 | unsigned long addr; |
534 | int npages; | 571 | int npages; |
572 | pfn_t pfn; | ||
535 | 573 | ||
536 | might_sleep(); | 574 | might_sleep(); |
537 | 575 | ||
@@ -544,19 +582,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) | |||
544 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, | 582 | npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, |
545 | NULL); | 583 | NULL); |
546 | 584 | ||
547 | if (npages != 1) { | 585 | if (unlikely(npages != 1)) { |
548 | get_page(bad_page); | 586 | struct vm_area_struct *vma; |
549 | return page_to_pfn(bad_page); | ||
550 | } | ||
551 | 587 | ||
552 | return page_to_pfn(page[0]); | 588 | vma = find_vma(current->mm, addr); |
589 | if (vma == NULL || addr < vma->vm_start || | ||
590 | !(vma->vm_flags & VM_PFNMAP)) { | ||
591 | get_page(bad_page); | ||
592 | return page_to_pfn(bad_page); | ||
593 | } | ||
594 | |||
595 | pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | ||
596 | BUG_ON(pfn_valid(pfn)); | ||
597 | } else | ||
598 | pfn = page_to_pfn(page[0]); | ||
599 | |||
600 | return pfn; | ||
553 | } | 601 | } |
554 | 602 | ||
555 | EXPORT_SYMBOL_GPL(gfn_to_pfn); | 603 | EXPORT_SYMBOL_GPL(gfn_to_pfn); |
556 | 604 | ||
557 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) | 605 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) |
558 | { | 606 | { |
559 | return pfn_to_page(gfn_to_pfn(kvm, gfn)); | 607 | pfn_t pfn; |
608 | |||
609 | pfn = gfn_to_pfn(kvm, gfn); | ||
610 | if (pfn_valid(pfn)) | ||
611 | return pfn_to_page(pfn); | ||
612 | |||
613 | WARN_ON(!pfn_valid(pfn)); | ||
614 | |||
615 | get_page(bad_page); | ||
616 | return bad_page; | ||
560 | } | 617 | } |
561 | 618 | ||
562 | EXPORT_SYMBOL_GPL(gfn_to_page); | 619 | EXPORT_SYMBOL_GPL(gfn_to_page); |
@@ -569,7 +626,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); | |||
569 | 626 | ||
570 | void kvm_release_pfn_clean(pfn_t pfn) | 627 | void kvm_release_pfn_clean(pfn_t pfn) |
571 | { | 628 | { |
572 | put_page(pfn_to_page(pfn)); | 629 | if (pfn_valid(pfn)) |
630 | put_page(pfn_to_page(pfn)); | ||
573 | } | 631 | } |
574 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | 632 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
575 | 633 | ||
@@ -594,21 +652,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty); | |||
594 | 652 | ||
595 | void kvm_set_pfn_dirty(pfn_t pfn) | 653 | void kvm_set_pfn_dirty(pfn_t pfn) |
596 | { | 654 | { |
597 | struct page *page = pfn_to_page(pfn); | 655 | if (pfn_valid(pfn)) { |
598 | if (!PageReserved(page)) | 656 | struct page *page = pfn_to_page(pfn); |
599 | SetPageDirty(page); | 657 | if (!PageReserved(page)) |
658 | SetPageDirty(page); | ||
659 | } | ||
600 | } | 660 | } |
601 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); | 661 | EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); |
602 | 662 | ||
603 | void kvm_set_pfn_accessed(pfn_t pfn) | 663 | void kvm_set_pfn_accessed(pfn_t pfn) |
604 | { | 664 | { |
605 | mark_page_accessed(pfn_to_page(pfn)); | 665 | if (pfn_valid(pfn)) |
666 | mark_page_accessed(pfn_to_page(pfn)); | ||
606 | } | 667 | } |
607 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); | 668 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
608 | 669 | ||
609 | void kvm_get_pfn(pfn_t pfn) | 670 | void kvm_get_pfn(pfn_t pfn) |
610 | { | 671 | { |
611 | get_page(pfn_to_page(pfn)); | 672 | if (pfn_valid(pfn)) |
673 | get_page(pfn_to_page(pfn)); | ||
612 | } | 674 | } |
613 | EXPORT_SYMBOL_GPL(kvm_get_pfn); | 675 | EXPORT_SYMBOL_GPL(kvm_get_pfn); |
614 | 676 | ||
@@ -799,6 +861,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
799 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) | 861 | else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) |
800 | page = virt_to_page(vcpu->arch.pio_data); | 862 | page = virt_to_page(vcpu->arch.pio_data); |
801 | #endif | 863 | #endif |
864 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
865 | else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) | ||
866 | page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); | ||
867 | #endif | ||
802 | else | 868 | else |
803 | return VM_FAULT_SIGBUS; | 869 | return VM_FAULT_SIGBUS; |
804 | get_page(page); | 870 | get_page(page); |
@@ -1121,6 +1187,32 @@ static long kvm_vm_ioctl(struct file *filp, | |||
1121 | goto out; | 1187 | goto out; |
1122 | break; | 1188 | break; |
1123 | } | 1189 | } |
1190 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
1191 | case KVM_REGISTER_COALESCED_MMIO: { | ||
1192 | struct kvm_coalesced_mmio_zone zone; | ||
1193 | r = -EFAULT; | ||
1194 | if (copy_from_user(&zone, argp, sizeof zone)) | ||
1195 | goto out; | ||
1196 | r = -ENXIO; | ||
1197 | r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); | ||
1198 | if (r) | ||
1199 | goto out; | ||
1200 | r = 0; | ||
1201 | break; | ||
1202 | } | ||
1203 | case KVM_UNREGISTER_COALESCED_MMIO: { | ||
1204 | struct kvm_coalesced_mmio_zone zone; | ||
1205 | r = -EFAULT; | ||
1206 | if (copy_from_user(&zone, argp, sizeof zone)) | ||
1207 | goto out; | ||
1208 | r = -ENXIO; | ||
1209 | r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); | ||
1210 | if (r) | ||
1211 | goto out; | ||
1212 | r = 0; | ||
1213 | break; | ||
1214 | } | ||
1215 | #endif | ||
1124 | default: | 1216 | default: |
1125 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); | 1217 | r = kvm_arch_vm_ioctl(filp, ioctl, arg); |
1126 | } | 1218 | } |
@@ -1179,7 +1271,6 @@ static int kvm_dev_ioctl_create_vm(void) | |||
1179 | static long kvm_dev_ioctl(struct file *filp, | 1271 | static long kvm_dev_ioctl(struct file *filp, |
1180 | unsigned int ioctl, unsigned long arg) | 1272 | unsigned int ioctl, unsigned long arg) |
1181 | { | 1273 | { |
1182 | void __user *argp = (void __user *)arg; | ||
1183 | long r = -EINVAL; | 1274 | long r = -EINVAL; |
1184 | 1275 | ||
1185 | switch (ioctl) { | 1276 | switch (ioctl) { |
@@ -1196,7 +1287,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1196 | r = kvm_dev_ioctl_create_vm(); | 1287 | r = kvm_dev_ioctl_create_vm(); |
1197 | break; | 1288 | break; |
1198 | case KVM_CHECK_EXTENSION: | 1289 | case KVM_CHECK_EXTENSION: |
1199 | r = kvm_dev_ioctl_check_extension((long)argp); | 1290 | r = kvm_dev_ioctl_check_extension(arg); |
1200 | break; | 1291 | break; |
1201 | case KVM_GET_VCPU_MMAP_SIZE: | 1292 | case KVM_GET_VCPU_MMAP_SIZE: |
1202 | r = -EINVAL; | 1293 | r = -EINVAL; |
@@ -1206,6 +1297,9 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1206 | #ifdef CONFIG_X86 | 1297 | #ifdef CONFIG_X86 |
1207 | r += PAGE_SIZE; /* pio data page */ | 1298 | r += PAGE_SIZE; /* pio data page */ |
1208 | #endif | 1299 | #endif |
1300 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | ||
1301 | r += PAGE_SIZE; /* coalesced mmio ring page */ | ||
1302 | #endif | ||
1209 | break; | 1303 | break; |
1210 | case KVM_TRACE_ENABLE: | 1304 | case KVM_TRACE_ENABLE: |
1211 | case KVM_TRACE_PAUSE: | 1305 | case KVM_TRACE_PAUSE: |
@@ -1247,7 +1341,6 @@ static void hardware_disable(void *junk) | |||
1247 | if (!cpu_isset(cpu, cpus_hardware_enabled)) | 1341 | if (!cpu_isset(cpu, cpus_hardware_enabled)) |
1248 | return; | 1342 | return; |
1249 | cpu_clear(cpu, cpus_hardware_enabled); | 1343 | cpu_clear(cpu, cpus_hardware_enabled); |
1250 | decache_vcpus_on_cpu(cpu); | ||
1251 | kvm_arch_hardware_disable(NULL); | 1344 | kvm_arch_hardware_disable(NULL); |
1252 | } | 1345 | } |
1253 | 1346 | ||
@@ -1277,6 +1370,18 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | |||
1277 | return NOTIFY_OK; | 1370 | return NOTIFY_OK; |
1278 | } | 1371 | } |
1279 | 1372 | ||
1373 | |||
1374 | asmlinkage void kvm_handle_fault_on_reboot(void) | ||
1375 | { | ||
1376 | if (kvm_rebooting) | ||
1377 | /* spin while reset goes on */ | ||
1378 | while (true) | ||
1379 | ; | ||
1380 | /* Fault while not rebooting. We want the trace. */ | ||
1381 | BUG(); | ||
1382 | } | ||
1383 | EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot); | ||
1384 | |||
1280 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 1385 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
1281 | void *v) | 1386 | void *v) |
1282 | { | 1387 | { |
@@ -1286,6 +1391,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | |||
1286 | * in vmx root mode. | 1391 | * in vmx root mode. |
1287 | */ | 1392 | */ |
1288 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | 1393 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); |
1394 | kvm_rebooting = true; | ||
1289 | on_each_cpu(hardware_disable, NULL, 1); | 1395 | on_each_cpu(hardware_disable, NULL, 1); |
1290 | } | 1396 | } |
1291 | return NOTIFY_OK; | 1397 | return NOTIFY_OK; |
@@ -1312,14 +1418,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus) | |||
1312 | } | 1418 | } |
1313 | } | 1419 | } |
1314 | 1420 | ||
1315 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr) | 1421 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, |
1422 | gpa_t addr, int len, int is_write) | ||
1316 | { | 1423 | { |
1317 | int i; | 1424 | int i; |
1318 | 1425 | ||
1319 | for (i = 0; i < bus->dev_count; i++) { | 1426 | for (i = 0; i < bus->dev_count; i++) { |
1320 | struct kvm_io_device *pos = bus->devs[i]; | 1427 | struct kvm_io_device *pos = bus->devs[i]; |
1321 | 1428 | ||
1322 | if (pos->in_range(pos, addr)) | 1429 | if (pos->in_range(pos, addr, len, is_write)) |
1323 | return pos; | 1430 | return pos; |
1324 | } | 1431 | } |
1325 | 1432 | ||
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c index 0e495470788d..58141f31ea8f 100644 --- a/virt/kvm/kvm_trace.c +++ b/virt/kvm/kvm_trace.c | |||
@@ -72,11 +72,7 @@ static void kvm_add_trace(void *probe_private, void *call_data, | |||
72 | rec.cycle_in = p->cycle_in; | 72 | rec.cycle_in = p->cycle_in; |
73 | 73 | ||
74 | if (rec.cycle_in) { | 74 | if (rec.cycle_in) { |
75 | u64 cycle = 0; | 75 | rec.u.cycle.cycle_u64 = get_cycles(); |
76 | |||
77 | cycle = get_cycles(); | ||
78 | rec.u.cycle.cycle_lo = (u32)cycle; | ||
79 | rec.u.cycle.cycle_hi = (u32)(cycle >> 32); | ||
80 | 76 | ||
81 | for (i = 0; i < rec.extra_u32; i++) | 77 | for (i = 0; i < rec.extra_u32; i++) |
82 | rec.u.cycle.extra_u32[i] = va_arg(*args, u32); | 78 | rec.u.cycle.extra_u32[i] = va_arg(*args, u32); |
@@ -114,8 +110,18 @@ static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
114 | { | 110 | { |
115 | struct kvm_trace *kt; | 111 | struct kvm_trace *kt; |
116 | 112 | ||
117 | if (!relay_buf_full(buf)) | 113 | if (!relay_buf_full(buf)) { |
114 | if (!prev_subbuf) { | ||
115 | /* | ||
116 | * executed only once when the channel is opened | ||
117 | * save metadata as first record | ||
118 | */ | ||
119 | subbuf_start_reserve(buf, sizeof(u32)); | ||
120 | *(u32 *)subbuf = 0x12345678; | ||
121 | } | ||
122 | |||
118 | return 1; | 123 | return 1; |
124 | } | ||
119 | 125 | ||
120 | kt = buf->chan->private_data; | 126 | kt = buf->chan->private_data; |
121 | atomic_inc(&kt->lost_records); | 127 | atomic_inc(&kt->lost_records); |