aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-08-11 13:01:49 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:18 -0400
commit6ad18fba05228fb1d47cdbc0339fe8b3fca1ca26 (patch)
tree4b64607dad75aa55dd397784d469d03244f0dfe7 /arch/x86/kvm/mmu.c
parentb772ff362ec6b821c8a5227a3355e263f917bfad (diff)
KVM: Reduce stack usage in kvm_pv_mmu_op()
We're in a hot path. We can't use kmalloc() because it might impact performance. So, we just stick the buffer that we need into the kvm_vcpu_arch structure. This is used very often, so it is not really a waste. We also have to move the buffer structure's definition to the arch-specific x86 kvm header. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c3afbfe6b0c1..171bcea1be21 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
135#define ACC_USER_MASK PT_USER_MASK 135#define ACC_USER_MASK PT_USER_MASK
136#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 136#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137 137
138struct kvm_pv_mmu_op_buffer {
139 void *ptr;
140 unsigned len;
141 unsigned processed;
142 char buf[512] __aligned(sizeof(long));
143};
144
145struct kvm_rmap_desc { 138struct kvm_rmap_desc {
146 u64 *shadow_ptes[RMAP_EXT]; 139 u64 *shadow_ptes[RMAP_EXT];
147 struct kvm_rmap_desc *more; 140 struct kvm_rmap_desc *more;
@@ -2292,18 +2285,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2292 gpa_t addr, unsigned long *ret) 2285 gpa_t addr, unsigned long *ret)
2293{ 2286{
2294 int r; 2287 int r;
2295 struct kvm_pv_mmu_op_buffer buffer; 2288 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2296 2289
2297 buffer.ptr = buffer.buf; 2290 buffer->ptr = buffer->buf;
2298 buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf); 2291 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2299 buffer.processed = 0; 2292 buffer->processed = 0;
2300 2293
2301 r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len); 2294 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2302 if (r) 2295 if (r)
2303 goto out; 2296 goto out;
2304 2297
2305 while (buffer.len) { 2298 while (buffer->len) {
2306 r = kvm_pv_mmu_op_one(vcpu, &buffer); 2299 r = kvm_pv_mmu_op_one(vcpu, buffer);
2307 if (r < 0) 2300 if (r < 0)
2308 goto out; 2301 goto out;
2309 if (r == 0) 2302 if (r == 0)
@@ -2312,7 +2305,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2312 2305
2313 r = 1; 2306 r = 1;
2314out: 2307out:
2315 *ret = buffer.processed; 2308 *ret = buffer->processed;
2316 return r; 2309 return r;
2317} 2310}
2318 2311