aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-07 08:05:33 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-01-30 06:50:11 -0500
commitfc708b3e407dfd2e12ba9a6cf35bd0bffad1796d (patch)
treee9a6df9c9b8cf4077c98198c3f5d3bc6dc991c0f /drivers
parenta53a35a8b485b9c16b73e5177bddaa4321971199 (diff)
lguest: replace lguest_arch with lg_cpu_arch.
The fields found in lguest_arch are not really per-guest, but per-cpu (gdt, idt, etc). So this patch turns lguest_arch into lg_cpu_arch. It makes sense to have a per-guest per-arch struct, but this can be addressed later, when the need arrives. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lguest/interrupts_and_traps.c28
-rw-r--r--drivers/lguest/lg.h19
-rw-r--r--drivers/lguest/segments.c42
-rw-r--r--drivers/lguest/x86/core.c24
4 files changed, 57 insertions, 56 deletions
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 468faf8233d6..306b93c71dcc 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -178,7 +178,7 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
178 /* Look at the IDT entry the Guest gave us for this interrupt. The 178 /* Look at the IDT entry the Guest gave us for this interrupt. The
179 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip 179 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
180 * over them. */ 180 * over them. */
181 idt = &lg->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; 181 idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
182 /* If they don't have a handler (yet?), we just ignore it */ 182 /* If they don't have a handler (yet?), we just ignore it */
183 if (idt_present(idt->a, idt->b)) { 183 if (idt_present(idt->a, idt->b)) {
184 /* OK, mark it no longer pending and deliver it. */ 184 /* OK, mark it no longer pending and deliver it. */
@@ -251,15 +251,15 @@ int deliver_trap(struct lg_cpu *cpu, unsigned int num)
251{ 251{
252 /* Trap numbers are always 8 bit, but we set an impossible trap number 252 /* Trap numbers are always 8 bit, but we set an impossible trap number
253 * for traps inside the Switcher, so check that here. */ 253 * for traps inside the Switcher, so check that here. */
254 if (num >= ARRAY_SIZE(cpu->lg->arch.idt)) 254 if (num >= ARRAY_SIZE(cpu->arch.idt))
255 return 0; 255 return 0;
256 256
257 /* Early on the Guest hasn't set the IDT entries (or maybe it put a 257 /* Early on the Guest hasn't set the IDT entries (or maybe it put a
258 * bogus one in): if we fail here, the Guest will be killed. */ 258 * bogus one in): if we fail here, the Guest will be killed. */
259 if (!idt_present(cpu->lg->arch.idt[num].a, cpu->lg->arch.idt[num].b)) 259 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
260 return 0; 260 return 0;
261 set_guest_interrupt(cpu, cpu->lg->arch.idt[num].a, 261 set_guest_interrupt(cpu, cpu->arch.idt[num].a,
262 cpu->lg->arch.idt[num].b, has_err(num)); 262 cpu->arch.idt[num].b, has_err(num));
263 return 1; 263 return 1;
264} 264}
265 265
@@ -385,7 +385,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,
385 * 385 *
386 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the 386 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
387 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ 387 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */
388void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi) 388void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
389{ 389{
390 /* Guest never handles: NMI, doublefault, spurious interrupt or 390 /* Guest never handles: NMI, doublefault, spurious interrupt or
391 * hypercall. We ignore when it tries to set them. */ 391 * hypercall. We ignore when it tries to set them. */
@@ -394,13 +394,13 @@ void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
394 394
395 /* Mark the IDT as changed: next time the Guest runs we'll know we have 395 /* Mark the IDT as changed: next time the Guest runs we'll know we have
396 * to copy this again. */ 396 * to copy this again. */
397 lg->changed |= CHANGED_IDT; 397 cpu->lg->changed |= CHANGED_IDT;
398 398
399 /* Check that the Guest doesn't try to step outside the bounds. */ 399 /* Check that the Guest doesn't try to step outside the bounds. */
400 if (num >= ARRAY_SIZE(lg->arch.idt)) 400 if (num >= ARRAY_SIZE(cpu->arch.idt))
401 kill_guest(lg, "Setting idt entry %u", num); 401 kill_guest(cpu->lg, "Setting idt entry %u", num);
402 else 402 else
403 set_trap(lg, &lg->arch.idt[num], num, lo, hi); 403 set_trap(cpu->lg, &cpu->arch.idt[num], num, lo, hi);
404} 404}
405 405
406/* The default entry for each interrupt points into the Switcher routines which 406/* The default entry for each interrupt points into the Switcher routines which
@@ -436,14 +436,14 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
436/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead 436/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead
437 * we copy them into the IDT which we've set up for Guests on this CPU, just 437 * we copy them into the IDT which we've set up for Guests on this CPU, just
438 * before we run the Guest. This routine does that copy. */ 438 * before we run the Guest. This routine does that copy. */
439void copy_traps(const struct lguest *lg, struct desc_struct *idt, 439void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
440 const unsigned long *def) 440 const unsigned long *def)
441{ 441{
442 unsigned int i; 442 unsigned int i;
443 443
444 /* We can simply copy the direct traps, otherwise we use the default 444 /* We can simply copy the direct traps, otherwise we use the default
445 * ones in the Switcher: they will return to the Host. */ 445 * ones in the Switcher: they will return to the Host. */
446 for (i = 0; i < ARRAY_SIZE(lg->arch.idt); i++) { 446 for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
447 /* If no Guest can ever override this trap, leave it alone. */ 447 /* If no Guest can ever override this trap, leave it alone. */
448 if (!direct_trap(i)) 448 if (!direct_trap(i))
449 continue; 449 continue;
@@ -452,8 +452,8 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
452 * Interrupt gates (type 14) disable interrupts as they are 452 * Interrupt gates (type 14) disable interrupts as they are
453 * entered, which we never let the Guest do. Not present 453 * entered, which we never let the Guest do. Not present
454 * entries (type 0x0) also can't go direct, of course. */ 454 * entries (type 0x0) also can't go direct, of course. */
455 if (idt_type(lg->arch.idt[i].a, lg->arch.idt[i].b) == 0xF) 455 if (idt_type(cpu->arch.idt[i].a, cpu->arch.idt[i].b) == 0xF)
456 idt[i] = lg->arch.idt[i]; 456 idt[i] = cpu->arch.idt[i];
457 else 457 else
458 /* Reset it to the default. */ 458 /* Reset it to the default. */
459 default_idt_entry(&idt[i], i, def[i]); 459 default_idt_entry(&idt[i], i, def[i]);
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 35b331230c55..d08b85342b92 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -57,6 +57,8 @@ struct lg_cpu {
57 57
58 /* Pending virtual interrupts */ 58 /* Pending virtual interrupts */
59 DECLARE_BITMAP(irqs_pending, LGUEST_IRQS); 59 DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
60
61 struct lg_cpu_arch arch;
60}; 62};
61 63
62/* The private info the thread maintains about the guest. */ 64/* The private info the thread maintains about the guest. */
@@ -99,8 +101,6 @@ struct lguest
99 101
100 /* Dead? */ 102 /* Dead? */
101 const char *dead; 103 const char *dead;
102
103 struct lguest_arch arch;
104}; 104};
105 105
106extern struct mutex lguest_lock; 106extern struct mutex lguest_lock;
@@ -139,12 +139,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
139/* interrupts_and_traps.c: */ 139/* interrupts_and_traps.c: */
140void maybe_do_interrupt(struct lg_cpu *cpu); 140void maybe_do_interrupt(struct lg_cpu *cpu);
141int deliver_trap(struct lg_cpu *cpu, unsigned int num); 141int deliver_trap(struct lg_cpu *cpu, unsigned int num);
142void load_guest_idt_entry(struct lguest *lg, unsigned int i, u32 low, u32 hi); 142void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
143 u32 low, u32 hi);
143void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages); 144void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages);
144void pin_stack_pages(struct lguest *lg); 145void pin_stack_pages(struct lguest *lg);
145void setup_default_idt_entries(struct lguest_ro_state *state, 146void setup_default_idt_entries(struct lguest_ro_state *state,
146 const unsigned long *def); 147 const unsigned long *def);
147void copy_traps(const struct lguest *lg, struct desc_struct *idt, 148void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
148 const unsigned long *def); 149 const unsigned long *def);
149void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); 150void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
150void init_clockdev(struct lg_cpu *cpu); 151void init_clockdev(struct lg_cpu *cpu);
@@ -154,11 +155,11 @@ void free_interrupts(void);
154 155
155/* segments.c: */ 156/* segments.c: */
156void setup_default_gdt_entries(struct lguest_ro_state *state); 157void setup_default_gdt_entries(struct lguest_ro_state *state);
157void setup_guest_gdt(struct lguest *lg); 158void setup_guest_gdt(struct lg_cpu *cpu);
158void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num); 159void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num);
159void guest_load_tls(struct lguest *lg, unsigned long tls_array); 160void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
160void copy_gdt(const struct lguest *lg, struct desc_struct *gdt); 161void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
161void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt); 162void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
162 163
163/* page_tables.c: */ 164/* page_tables.c: */
164int init_guest_pagetable(struct lguest *lg, unsigned long pgtable); 165int init_guest_pagetable(struct lguest *lg, unsigned long pgtable);
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 9e189cbec7dd..02138450ecf5 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -58,7 +58,7 @@ static int ignored_gdt(unsigned int num)
58 * Protection Fault in the Switcher when it restores a Guest segment register 58 * Protection Fault in the Switcher when it restores a Guest segment register
59 * which tries to use that entry. Then we kill the Guest for causing such a 59 * which tries to use that entry. Then we kill the Guest for causing such a
60 * mess: the message will be "unhandled trap 256". */ 60 * mess: the message will be "unhandled trap 256". */
61static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end) 61static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
62{ 62{
63 unsigned int i; 63 unsigned int i;
64 64
@@ -71,14 +71,14 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
71 /* Segment descriptors contain a privilege level: the Guest is 71 /* Segment descriptors contain a privilege level: the Guest is
72 * sometimes careless and leaves this as 0, even though it's 72 * sometimes careless and leaves this as 0, even though it's
73 * running at privilege level 1. If so, we fix it here. */ 73 * running at privilege level 1. If so, we fix it here. */
74 if ((lg->arch.gdt[i].b & 0x00006000) == 0) 74 if ((cpu->arch.gdt[i].b & 0x00006000) == 0)
75 lg->arch.gdt[i].b |= (GUEST_PL << 13); 75 cpu->arch.gdt[i].b |= (GUEST_PL << 13);
76 76
77 /* Each descriptor has an "accessed" bit. If we don't set it 77 /* Each descriptor has an "accessed" bit. If we don't set it
78 * now, the CPU will try to set it when the Guest first loads 78 * now, the CPU will try to set it when the Guest first loads
79 * that entry into a segment register. But the GDT isn't 79 * that entry into a segment register. But the GDT isn't
80 * writable by the Guest, so bad things can happen. */ 80 * writable by the Guest, so bad things can happen. */
81 lg->arch.gdt[i].b |= 0x00000100; 81 cpu->arch.gdt[i].b |= 0x00000100;
82 } 82 }
83} 83}
84 84
@@ -109,31 +109,31 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
109 109
110/* This routine sets up the initial Guest GDT for booting. All entries start 110/* This routine sets up the initial Guest GDT for booting. All entries start
111 * as 0 (unusable). */ 111 * as 0 (unusable). */
112void setup_guest_gdt(struct lguest *lg) 112void setup_guest_gdt(struct lg_cpu *cpu)
113{ 113{
114 /* Start with full 0-4G segments... */ 114 /* Start with full 0-4G segments... */
115 lg->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; 115 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
116 lg->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; 116 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
117 /* ...except the Guest is allowed to use them, so set the privilege 117 /* ...except the Guest is allowed to use them, so set the privilege
118 * level appropriately in the flags. */ 118 * level appropriately in the flags. */
119 lg->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); 119 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
120 lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); 120 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
121} 121}
122 122
123/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" 123/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage"
124 * entries. */ 124 * entries. */
125void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) 125void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
126{ 126{
127 unsigned int i; 127 unsigned int i;
128 128
129 for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) 129 for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
130 gdt[i] = lg->arch.gdt[i]; 130 gdt[i] = cpu->arch.gdt[i];
131} 131}
132 132
133/*H:640 When the Guest is run on a different CPU, or the GDT entries have 133/*H:640 When the Guest is run on a different CPU, or the GDT entries have
134 * changed, copy_gdt() is called to copy the Guest's GDT entries across to this 134 * changed, copy_gdt() is called to copy the Guest's GDT entries across to this
135 * CPU's GDT. */ 135 * CPU's GDT. */
136void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) 136void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
137{ 137{
138 unsigned int i; 138 unsigned int i;
139 139
@@ -141,21 +141,22 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
141 * replaced. See ignored_gdt() above. */ 141 * replaced. See ignored_gdt() above. */
142 for (i = 0; i < GDT_ENTRIES; i++) 142 for (i = 0; i < GDT_ENTRIES; i++)
143 if (!ignored_gdt(i)) 143 if (!ignored_gdt(i))
144 gdt[i] = lg->arch.gdt[i]; 144 gdt[i] = cpu->arch.gdt[i];
145} 145}
146 146
147/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). 147/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT).
148 * We copy it from the Guest and tweak the entries. */ 148 * We copy it from the Guest and tweak the entries. */
149void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) 149void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num)
150{ 150{
151 struct lguest *lg = cpu->lg;
151 /* We assume the Guest has the same number of GDT entries as the 152 /* We assume the Guest has the same number of GDT entries as the
152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 153 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
153 if (num > ARRAY_SIZE(lg->arch.gdt)) 154 if (num > ARRAY_SIZE(cpu->arch.gdt))
154 kill_guest(lg, "too many gdt entries %i", num); 155 kill_guest(lg, "too many gdt entries %i", num);
155 156
156 /* We read the whole thing in, then fix it up. */ 157 /* We read the whole thing in, then fix it up. */
157 __lgread(lg, lg->arch.gdt, table, num * sizeof(lg->arch.gdt[0])); 158 __lgread(lg, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0]));
158 fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->arch.gdt)); 159 fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt));
159 /* Mark that the GDT changed so the core knows it has to copy it again, 160 /* Mark that the GDT changed so the core knows it has to copy it again,
160 * even if the Guest is run on the same CPU. */ 161 * even if the Guest is run on the same CPU. */
161 lg->changed |= CHANGED_GDT; 162 lg->changed |= CHANGED_GDT;
@@ -165,12 +166,13 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
165 * Remember that this happens on every context switch, so it's worth 166 * Remember that this happens on every context switch, so it's worth
166 * optimizing. But wouldn't it be neater to have a single hypercall to cover 167 * optimizing. But wouldn't it be neater to have a single hypercall to cover
167 * both cases? */ 168 * both cases? */
168void guest_load_tls(struct lguest *lg, unsigned long gtls) 169void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
169{ 170{
170 struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN]; 171 struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
172 struct lguest *lg = cpu->lg;
171 173
172 __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); 174 __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
173 fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); 175 fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
174 /* Note that just the TLS entries have changed. */ 176 /* Note that just the TLS entries have changed. */
175 lg->changed |= CHANGED_GDT_TLS; 177 lg->changed |= CHANGED_GDT_TLS;
176} 178}
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index d96a93d95aea..e989b8358864 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -100,14 +100,14 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
100 100
101 /* Copy direct-to-Guest trap entries. */ 101 /* Copy direct-to-Guest trap entries. */
102 if (lg->changed & CHANGED_IDT) 102 if (lg->changed & CHANGED_IDT)
103 copy_traps(lg, pages->state.guest_idt, default_idt_entries); 103 copy_traps(cpu, pages->state.guest_idt, default_idt_entries);
104 104
105 /* Copy all GDT entries which the Guest can change. */ 105 /* Copy all GDT entries which the Guest can change. */
106 if (lg->changed & CHANGED_GDT) 106 if (lg->changed & CHANGED_GDT)
107 copy_gdt(lg, pages->state.guest_gdt); 107 copy_gdt(cpu, pages->state.guest_gdt);
108 /* If only the TLS entries have changed, copy them. */ 108 /* If only the TLS entries have changed, copy them. */
109 else if (lg->changed & CHANGED_GDT_TLS) 109 else if (lg->changed & CHANGED_GDT_TLS)
110 copy_gdt_tls(lg, pages->state.guest_gdt); 110 copy_gdt_tls(cpu, pages->state.guest_gdt);
111 111
112 /* Mark the Guest as unchanged for next time. */ 112 /* Mark the Guest as unchanged for next time. */
113 lg->changed = 0; 113 lg->changed = 0;
@@ -196,7 +196,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
196 * re-enable interrupts an interrupt could fault and thus overwrite 196 * re-enable interrupts an interrupt could fault and thus overwrite
197 * cr2, or we could even move off to a different CPU. */ 197 * cr2, or we could even move off to a different CPU. */
198 if (cpu->regs->trapnum == 14) 198 if (cpu->regs->trapnum == 14)
199 lg->arch.last_pagefault = read_cr2(); 199 cpu->arch.last_pagefault = read_cr2();
200 /* Similarly, if we took a trap because the Guest used the FPU, 200 /* Similarly, if we took a trap because the Guest used the FPU,
201 * we have to restore the FPU it expects to see. */ 201 * we have to restore the FPU it expects to see. */
202 else if (cpu->regs->trapnum == 7) 202 else if (cpu->regs->trapnum == 7)
@@ -307,7 +307,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
307 * 307 *
308 * The errcode tells whether this was a read or a write, and 308 * The errcode tells whether this was a read or a write, and
309 * whether kernel or userspace code. */ 309 * whether kernel or userspace code. */
310 if (demand_page(lg, lg->arch.last_pagefault, cpu->regs->errcode)) 310 if (demand_page(lg,cpu->arch.last_pagefault,cpu->regs->errcode))
311 return; 311 return;
312 312
313 /* OK, it's really not there (or not OK): the Guest needs to 313 /* OK, it's really not there (or not OK): the Guest needs to
@@ -318,7 +318,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
318 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so 318 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
319 * lg->lguest_data could be NULL */ 319 * lg->lguest_data could be NULL */
320 if (lg->lguest_data && 320 if (lg->lguest_data &&
321 put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2)) 321 put_user(cpu->arch.last_pagefault, &lg->lguest_data->cr2))
322 kill_guest(lg, "Writing cr2"); 322 kill_guest(lg, "Writing cr2");
323 break; 323 break;
324 case 7: /* We've intercepted a Device Not Available fault. */ 324 case 7: /* We've intercepted a Device Not Available fault. */
@@ -349,7 +349,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
349 * it handle), it dies with a cryptic error message. */ 349 * it handle), it dies with a cryptic error message. */
350 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)", 350 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
351 cpu->regs->trapnum, cpu->regs->eip, 351 cpu->regs->trapnum, cpu->regs->eip,
352 cpu->regs->trapnum == 14 ? lg->arch.last_pagefault 352 cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
353 : cpu->regs->errcode); 353 : cpu->regs->errcode);
354} 354}
355 355
@@ -495,17 +495,15 @@ void __exit lguest_arch_host_fini(void)
495/*H:122 The i386-specific hypercalls simply farm out to the right functions. */ 495/*H:122 The i386-specific hypercalls simply farm out to the right functions. */
496int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) 496int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
497{ 497{
498 struct lguest *lg = cpu->lg;
499
500 switch (args->arg0) { 498 switch (args->arg0) {
501 case LHCALL_LOAD_GDT: 499 case LHCALL_LOAD_GDT:
502 load_guest_gdt(lg, args->arg1, args->arg2); 500 load_guest_gdt(cpu, args->arg1, args->arg2);
503 break; 501 break;
504 case LHCALL_LOAD_IDT_ENTRY: 502 case LHCALL_LOAD_IDT_ENTRY:
505 load_guest_idt_entry(lg, args->arg1, args->arg2, args->arg3); 503 load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
506 break; 504 break;
507 case LHCALL_LOAD_TLS: 505 case LHCALL_LOAD_TLS:
508 guest_load_tls(lg, args->arg1); 506 guest_load_tls(cpu, args->arg1);
509 break; 507 break;
510 default: 508 default:
511 /* Bad Guest. Bad! */ 509 /* Bad Guest. Bad! */
@@ -586,5 +584,5 @@ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
586 584
587 /* There are a couple of GDT entries the Guest expects when first 585 /* There are a couple of GDT entries the Guest expects when first
588 * booting. */ 586 * booting. */
589 setup_guest_gdt(cpu->lg); 587 setup_guest_gdt(cpu);
590} 588}