aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-07 08:05:37 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-01-30 06:50:14 -0500
commit1713608f280002d9ffc6de89d7de5cf367072d63 (patch)
tree332e7bdbe7ccccad408b309a4dd00b706b04082f /drivers/lguest
parent5e232f4f428c4266ba5cdae9f23ba19a0913dcf9 (diff)
lguest: per-vcpu lguest pgdir management
this patch makes the pgdir management per-vcpu. The pgdirs pool is still guest-wide (although it'll probably need to grow when we are really executing more vcpus), but the pgdidx index is gone, since it makes no sense anymore. Instead, we use a per-vcpu index. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/lguest')
-rw-r--r--drivers/lguest/hypercalls.c2
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h12
-rw-r--r--drivers/lguest/page_tables.c59
-rw-r--r--drivers/lguest/x86/core.c7
5 files changed, 44 insertions, 42 deletions
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index be8f04685767..0471018d700d 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -62,7 +62,7 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
62 if (args->arg1) 62 if (args->arg1)
63 guest_pagetable_clear_all(cpu); 63 guest_pagetable_clear_all(cpu);
64 else 64 else
65 guest_pagetable_flush_user(lg); 65 guest_pagetable_flush_user(cpu);
66 break; 66 break;
67 67
68 /* All these calls simply pass the arguments through to the right 68 /* All these calls simply pass the arguments through to the right
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index b87d9d6c36a4..6bbfce4e5987 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -76,7 +76,7 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
76 virtstack = cpu->esp1; 76 virtstack = cpu->esp1;
77 ss = cpu->ss1; 77 ss = cpu->ss1;
78 78
79 origstack = gstack = guest_pa(lg, virtstack); 79 origstack = gstack = guest_pa(cpu, virtstack);
80 /* We push the old stack segment and pointer onto the new 80 /* We push the old stack segment and pointer onto the new
81 * stack: when the Guest does an "iret" back from the interrupt 81 * stack: when the Guest does an "iret" back from the interrupt
82 * handler the CPU will notice they're dropping privilege 82 * handler the CPU will notice they're dropping privilege
@@ -88,7 +88,7 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
88 virtstack = cpu->regs->esp; 88 virtstack = cpu->regs->esp;
89 ss = cpu->regs->ss; 89 ss = cpu->regs->ss;
90 90
91 origstack = gstack = guest_pa(lg, virtstack); 91 origstack = gstack = guest_pa(cpu, virtstack);
92 } 92 }
93 93
94 /* Remember that we never let the Guest actually disable interrupts, so 94 /* Remember that we never let the Guest actually disable interrupts, so
@@ -323,7 +323,7 @@ void pin_stack_pages(struct lg_cpu *cpu)
323 * start of the page after the kernel stack. Subtract one to 323 * start of the page after the kernel stack. Subtract one to
324 * get back onto the first stack page, and keep subtracting to 324 * get back onto the first stack page, and keep subtracting to
325 * get to the rest of the stack pages. */ 325 * get to the rest of the stack pages. */
326 pin_page(lg, cpu->esp1 - 1 - i * PAGE_SIZE); 326 pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
327} 327}
328 328
329/* Direct traps also mean that we need to know whenever the Guest wants to use 329/* Direct traps also mean that we need to know whenever the Guest wants to use
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 95b473cdd0e0..94e518da9aa8 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -57,6 +57,8 @@ struct lg_cpu {
57 unsigned long regs_page; 57 unsigned long regs_page;
58 struct lguest_regs *regs; 58 struct lguest_regs *regs;
59 59
60 int cpu_pgd; /* which pgd this cpu is currently using */
61
60 /* If a hypercall was asked for, this points to the arguments. */ 62 /* If a hypercall was asked for, this points to the arguments. */
61 struct hcall_args *hcall; 63 struct hcall_args *hcall;
62 u32 next_hcall; 64 u32 next_hcall;
@@ -92,8 +94,6 @@ struct lguest
92 int changed; 94 int changed;
93 struct lguest_pages *last_pages; 95 struct lguest_pages *last_pages;
94 96
95 /* We keep a small number of these. */
96 u32 pgdidx;
97 struct pgdir pgdirs[4]; 97 struct pgdir pgdirs[4];
98 98
99 unsigned long noirq_start, noirq_end; 99 unsigned long noirq_start, noirq_end;
@@ -169,13 +169,13 @@ void free_guest_pagetable(struct lguest *lg);
169void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); 169void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
170void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); 170void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
171void guest_pagetable_clear_all(struct lg_cpu *cpu); 171void guest_pagetable_clear_all(struct lg_cpu *cpu);
172void guest_pagetable_flush_user(struct lguest *lg); 172void guest_pagetable_flush_user(struct lg_cpu *cpu);
173void guest_set_pte(struct lguest *lg, unsigned long gpgdir, 173void guest_set_pte(struct lguest *lg, unsigned long gpgdir,
174 unsigned long vaddr, pte_t val); 174 unsigned long vaddr, pte_t val);
175void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); 175void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
176int demand_page(struct lguest *info, unsigned long cr2, int errcode); 176int demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
177void pin_page(struct lguest *lg, unsigned long vaddr); 177void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
178unsigned long guest_pa(struct lguest *lg, unsigned long vaddr); 178unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
179void page_table_guest_data_init(struct lguest *lg); 179void page_table_guest_data_init(struct lguest *lg);
180 180
181/* <arch>/core.c: */ 181/* <arch>/core.c: */
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index e34c81636a8c..fb665611ccc2 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -94,10 +94,10 @@ static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
94 94
95/* These two functions just like the above two, except they access the Guest 95/* These two functions just like the above two, except they access the Guest
96 * page tables. Hence they return a Guest address. */ 96 * page tables. Hence they return a Guest address. */
97static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) 97static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
98{ 98{
99 unsigned int index = vaddr >> (PGDIR_SHIFT); 99 unsigned int index = vaddr >> (PGDIR_SHIFT);
100 return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t); 100 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
101} 101}
102 102
103static unsigned long gpte_addr(struct lguest *lg, 103static unsigned long gpte_addr(struct lguest *lg,
@@ -200,22 +200,23 @@ static void check_gpgd(struct lguest *lg, pgd_t gpgd)
200 * 200 *
201 * If we fixed up the fault (ie. we mapped the address), this routine returns 201 * If we fixed up the fault (ie. we mapped the address), this routine returns
202 * true. Otherwise, it was a real fault and we need to tell the Guest. */ 202 * true. Otherwise, it was a real fault and we need to tell the Guest. */
203int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) 203int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
204{ 204{
205 pgd_t gpgd; 205 pgd_t gpgd;
206 pgd_t *spgd; 206 pgd_t *spgd;
207 unsigned long gpte_ptr; 207 unsigned long gpte_ptr;
208 pte_t gpte; 208 pte_t gpte;
209 pte_t *spte; 209 pte_t *spte;
210 struct lguest *lg = cpu->lg;
210 211
211 /* First step: get the top-level Guest page table entry. */ 212 /* First step: get the top-level Guest page table entry. */
212 gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t); 213 gpgd = lgread(lg, gpgd_addr(cpu, vaddr), pgd_t);
213 /* Toplevel not present? We can't map it in. */ 214 /* Toplevel not present? We can't map it in. */
214 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 215 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
215 return 0; 216 return 0;
216 217
217 /* Now look at the matching shadow entry. */ 218 /* Now look at the matching shadow entry. */
218 spgd = spgd_addr(lg, lg->pgdidx, vaddr); 219 spgd = spgd_addr(lg, cpu->cpu_pgd, vaddr);
219 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { 220 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
220 /* No shadow entry: allocate a new shadow PTE page. */ 221 /* No shadow entry: allocate a new shadow PTE page. */
221 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 222 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
@@ -297,19 +298,19 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
297 * 298 *
298 * This is a quick version which answers the question: is this virtual address 299 * This is a quick version which answers the question: is this virtual address
299 * mapped by the shadow page tables, and is it writable? */ 300 * mapped by the shadow page tables, and is it writable? */
300static int page_writable(struct lguest *lg, unsigned long vaddr) 301static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
301{ 302{
302 pgd_t *spgd; 303 pgd_t *spgd;
303 unsigned long flags; 304 unsigned long flags;
304 305
305 /* Look at the current top level entry: is it present? */ 306 /* Look at the current top level entry: is it present? */
306 spgd = spgd_addr(lg, lg->pgdidx, vaddr); 307 spgd = spgd_addr(cpu->lg, cpu->cpu_pgd, vaddr);
307 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 308 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
308 return 0; 309 return 0;
309 310
310 /* Check the flags on the pte entry itself: it must be present and 311 /* Check the flags on the pte entry itself: it must be present and
311 * writable. */ 312 * writable. */
312 flags = pte_flags(*(spte_addr(lg, *spgd, vaddr))); 313 flags = pte_flags(*(spte_addr(cpu->lg, *spgd, vaddr)));
313 314
314 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 315 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
315} 316}
@@ -317,10 +318,10 @@ static int page_writable(struct lguest *lg, unsigned long vaddr)
317/* So, when pin_stack_pages() asks us to pin a page, we check if it's already 318/* So, when pin_stack_pages() asks us to pin a page, we check if it's already
318 * in the page tables, and if not, we call demand_page() with error code 2 319 * in the page tables, and if not, we call demand_page() with error code 2
319 * (meaning "write"). */ 320 * (meaning "write"). */
320void pin_page(struct lguest *lg, unsigned long vaddr) 321void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
321{ 322{
322 if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2)) 323 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
323 kill_guest(lg, "bad stack page %#lx", vaddr); 324 kill_guest(cpu->lg, "bad stack page %#lx", vaddr);
324} 325}
325 326
326/*H:450 If we chase down the release_pgd() code, it looks like this: */ 327/*H:450 If we chase down the release_pgd() code, it looks like this: */
@@ -358,28 +359,28 @@ static void flush_user_mappings(struct lguest *lg, int idx)
358 * 359 *
359 * The Guest has a hypercall to throw away the page tables: it's used when a 360 * The Guest has a hypercall to throw away the page tables: it's used when a
360 * large number of mappings have been changed. */ 361 * large number of mappings have been changed. */
361void guest_pagetable_flush_user(struct lguest *lg) 362void guest_pagetable_flush_user(struct lg_cpu *cpu)
362{ 363{
363 /* Drop the userspace part of the current page table. */ 364 /* Drop the userspace part of the current page table. */
364 flush_user_mappings(lg, lg->pgdidx); 365 flush_user_mappings(cpu->lg, cpu->cpu_pgd);
365} 366}
366/*:*/ 367/*:*/
367 368
368/* We walk down the guest page tables to get a guest-physical address */ 369/* We walk down the guest page tables to get a guest-physical address */
369unsigned long guest_pa(struct lguest *lg, unsigned long vaddr) 370unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
370{ 371{
371 pgd_t gpgd; 372 pgd_t gpgd;
372 pte_t gpte; 373 pte_t gpte;
373 374
374 /* First step: get the top-level Guest page table entry. */ 375 /* First step: get the top-level Guest page table entry. */
375 gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t); 376 gpgd = lgread(cpu->lg, gpgd_addr(cpu, vaddr), pgd_t);
376 /* Toplevel not present? We can't map it in. */ 377 /* Toplevel not present? We can't map it in. */
377 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 378 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
378 kill_guest(lg, "Bad address %#lx", vaddr); 379 kill_guest(cpu->lg, "Bad address %#lx", vaddr);
379 380
380 gpte = lgread(lg, gpte_addr(lg, gpgd, vaddr), pte_t); 381 gpte = lgread(cpu->lg, gpte_addr(cpu->lg, gpgd, vaddr), pte_t);
381 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 382 if (!(pte_flags(gpte) & _PAGE_PRESENT))
382 kill_guest(lg, "Bad address %#lx", vaddr); 383 kill_guest(cpu->lg, "Bad address %#lx", vaddr);
383 384
384 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 385 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
385} 386}
@@ -399,11 +400,12 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
399/*H:435 And this is us, creating the new page directory. If we really do 400/*H:435 And this is us, creating the new page directory. If we really do
400 * allocate a new one (and so the kernel parts are not there), we set 401 * allocate a new one (and so the kernel parts are not there), we set
401 * blank_pgdir. */ 402 * blank_pgdir. */
402static unsigned int new_pgdir(struct lguest *lg, 403static unsigned int new_pgdir(struct lg_cpu *cpu,
403 unsigned long gpgdir, 404 unsigned long gpgdir,
404 int *blank_pgdir) 405 int *blank_pgdir)
405{ 406{
406 unsigned int next; 407 unsigned int next;
408 struct lguest *lg = cpu->lg;
407 409
408 /* We pick one entry at random to throw out. Choosing the Least 410 /* We pick one entry at random to throw out. Choosing the Least
409 * Recently Used might be better, but this is easy. */ 411 * Recently Used might be better, but this is easy. */
@@ -413,7 +415,7 @@ static unsigned int new_pgdir(struct lguest *lg,
413 lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 415 lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
414 /* If the allocation fails, just keep using the one we have */ 416 /* If the allocation fails, just keep using the one we have */
415 if (!lg->pgdirs[next].pgdir) 417 if (!lg->pgdirs[next].pgdir)
416 next = lg->pgdidx; 418 next = cpu->cpu_pgd;
417 else 419 else
418 /* This is a blank page, so there are no kernel 420 /* This is a blank page, so there are no kernel
419 * mappings: caller must map the stack! */ 421 * mappings: caller must map the stack! */
@@ -442,9 +444,9 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
442 /* If not, we allocate or mug an existing one: if it's a fresh one, 444 /* If not, we allocate or mug an existing one: if it's a fresh one,
443 * repin gets set to 1. */ 445 * repin gets set to 1. */
444 if (newpgdir == ARRAY_SIZE(lg->pgdirs)) 446 if (newpgdir == ARRAY_SIZE(lg->pgdirs))
445 newpgdir = new_pgdir(lg, pgtable, &repin); 447 newpgdir = new_pgdir(cpu, pgtable, &repin);
446 /* Change the current pgd index to the new one. */ 448 /* Change the current pgd index to the new one. */
447 lg->pgdidx = newpgdir; 449 cpu->cpu_pgd = newpgdir;
448 /* If it was completely blank, we map in the Guest kernel stack */ 450 /* If it was completely blank, we map in the Guest kernel stack */
449 if (repin) 451 if (repin)
450 pin_stack_pages(cpu); 452 pin_stack_pages(cpu);
@@ -591,11 +593,11 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
591{ 593{
592 /* We start on the first shadow page table, and give it a blank PGD 594 /* We start on the first shadow page table, and give it a blank PGD
593 * page. */ 595 * page. */
594 lg->pgdidx = 0; 596 lg->pgdirs[0].gpgdir = pgtable;
595 lg->pgdirs[lg->pgdidx].gpgdir = pgtable; 597 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
596 lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL); 598 if (!lg->pgdirs[0].pgdir)
597 if (!lg->pgdirs[lg->pgdidx].pgdir)
598 return -ENOMEM; 599 return -ENOMEM;
600 lg->cpus[0].cpu_pgd = 0;
599 return 0; 601 return 0;
600} 602}
601 603
@@ -607,7 +609,7 @@ void page_table_guest_data_init(struct lguest *lg)
607 /* We tell the Guest that it can't use the top 4MB of virtual 609 /* We tell the Guest that it can't use the top 4MB of virtual
608 * addresses used by the Switcher. */ 610 * addresses used by the Switcher. */
609 || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem) 611 || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
610 || put_user(lg->pgdirs[lg->pgdidx].gpgdir,&lg->lguest_data->pgdir)) 612 || put_user(lg->pgdirs[0].gpgdir, &lg->lguest_data->pgdir))
611 kill_guest(lg, "bad guest page %p", lg->lguest_data); 613 kill_guest(lg, "bad guest page %p", lg->lguest_data);
612 614
613 /* In flush_user_mappings() we loop from 0 to 615 /* In flush_user_mappings() we loop from 0 to
@@ -637,7 +639,6 @@ void free_guest_pagetable(struct lguest *lg)
637 * Guest is about to run on this CPU. */ 639 * Guest is about to run on this CPU. */
638void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 640void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
639{ 641{
640 struct lguest *lg = cpu->lg;
641 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 642 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
642 pgd_t switcher_pgd; 643 pgd_t switcher_pgd;
643 pte_t regs_pte; 644 pte_t regs_pte;
@@ -647,7 +648,7 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
647 * page for this CPU (with appropriate flags). */ 648 * page for this CPU (with appropriate flags). */
648 switcher_pgd = __pgd(__pa(switcher_pte_page) | _PAGE_KERNEL); 649 switcher_pgd = __pgd(__pa(switcher_pte_page) | _PAGE_KERNEL);
649 650
650 lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 651 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
651 652
652 /* We also change the Switcher PTE page. When we're running the Guest, 653 /* We also change the Switcher PTE page. When we're running the Guest,
653 * we want the Guest's "regs" page to appear where the first Switcher 654 * we want the Guest's "regs" page to appear where the first Switcher
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 65f2e3809475..8c723555ffb3 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -145,7 +145,7 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
145 * 0-th argument above, ie "a"). %ebx contains the 145 * 0-th argument above, ie "a"). %ebx contains the
146 * physical address of the Guest's top-level page 146 * physical address of the Guest's top-level page
147 * directory. */ 147 * directory. */
148 : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir)) 148 : "0"(pages), "1"(__pa(lg->pgdirs[cpu->cpu_pgd].pgdir))
149 /* We tell gcc that all these registers could change, 149 /* We tell gcc that all these registers could change,
150 * which means we don't have to save and restore them in 150 * which means we don't have to save and restore them in
151 * the Switcher. */ 151 * the Switcher. */
@@ -223,7 +223,7 @@ static int emulate_insn(struct lg_cpu *cpu)
223 unsigned int insnlen = 0, in = 0, shift = 0; 223 unsigned int insnlen = 0, in = 0, shift = 0;
224 /* The eip contains the *virtual* address of the Guest's instruction: 224 /* The eip contains the *virtual* address of the Guest's instruction:
225 * guest_pa just subtracts the Guest's page_offset. */ 225 * guest_pa just subtracts the Guest's page_offset. */
226 unsigned long physaddr = guest_pa(lg, cpu->regs->eip); 226 unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
227 227
228 /* This must be the Guest kernel trying to do something, not userspace! 228 /* This must be the Guest kernel trying to do something, not userspace!
229 * The bottom two bits of the CS segment register are the privilege 229 * The bottom two bits of the CS segment register are the privilege
@@ -305,7 +305,8 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
305 * 305 *
306 * The errcode tells whether this was a read or a write, and 306 * The errcode tells whether this was a read or a write, and
307 * whether kernel or userspace code. */ 307 * whether kernel or userspace code. */
308 if (demand_page(lg,cpu->arch.last_pagefault,cpu->regs->errcode)) 308 if (demand_page(cpu, cpu->arch.last_pagefault,
309 cpu->regs->errcode))
309 return; 310 return;
310 311
311 /* OK, it's really not there (or not OK): the Guest needs to 312 /* OK, it's really not there (or not OK): the Guest needs to