aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/interrupts_and_traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/interrupts_and_traps.c')
-rw-r--r--drivers/lguest/interrupts_and_traps.c149
1 files changed, 75 insertions, 74 deletions
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 2b66f79c208b..32e97c1858e5 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -41,11 +41,11 @@ static int idt_present(u32 lo, u32 hi)
41 41
42/* We need a helper to "push" a value onto the Guest's stack, since that's a 42/* We need a helper to "push" a value onto the Guest's stack, since that's a
43 * big part of what delivering an interrupt does. */ 43 * big part of what delivering an interrupt does. */
44static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val) 44static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
45{ 45{
46 /* Stack grows upwards: move stack then write value. */ 46 /* Stack grows upwards: move stack then write value. */
47 *gstack -= 4; 47 *gstack -= 4;
48 lgwrite(lg, *gstack, u32, val); 48 lgwrite(cpu, *gstack, u32, val);
49} 49}
50 50
51/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or 51/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
@@ -60,7 +60,7 @@ static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
60 * We set up the stack just like the CPU does for a real interrupt, so it's 60 * We set up the stack just like the CPU does for a real interrupt, so it's
61 * identical for the Guest (and the standard "iret" instruction will undo 61 * identical for the Guest (and the standard "iret" instruction will undo
62 * it). */ 62 * it). */
63static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err) 63static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
64{ 64{
65 unsigned long gstack, origstack; 65 unsigned long gstack, origstack;
66 u32 eflags, ss, irq_enable; 66 u32 eflags, ss, irq_enable;
@@ -69,59 +69,59 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
69 /* There are two cases for interrupts: one where the Guest is already 69 /* There are two cases for interrupts: one where the Guest is already
70 * in the kernel, and a more complex one where the Guest is in 70 * in the kernel, and a more complex one where the Guest is in
71 * userspace. We check the privilege level to find out. */ 71 * userspace. We check the privilege level to find out. */
72 if ((lg->regs->ss&0x3) != GUEST_PL) { 72 if ((cpu->regs->ss&0x3) != GUEST_PL) {
73 /* The Guest told us their kernel stack with the SET_STACK 73 /* The Guest told us their kernel stack with the SET_STACK
74 * hypercall: both the virtual address and the segment */ 74 * hypercall: both the virtual address and the segment */
75 virtstack = lg->esp1; 75 virtstack = cpu->esp1;
76 ss = lg->ss1; 76 ss = cpu->ss1;
77 77
78 origstack = gstack = guest_pa(lg, virtstack); 78 origstack = gstack = guest_pa(cpu, virtstack);
79 /* We push the old stack segment and pointer onto the new 79 /* We push the old stack segment and pointer onto the new
80 * stack: when the Guest does an "iret" back from the interrupt 80 * stack: when the Guest does an "iret" back from the interrupt
81 * handler the CPU will notice they're dropping privilege 81 * handler the CPU will notice they're dropping privilege
82 * levels and expect these here. */ 82 * levels and expect these here. */
83 push_guest_stack(lg, &gstack, lg->regs->ss); 83 push_guest_stack(cpu, &gstack, cpu->regs->ss);
84 push_guest_stack(lg, &gstack, lg->regs->esp); 84 push_guest_stack(cpu, &gstack, cpu->regs->esp);
85 } else { 85 } else {
86 /* We're staying on the same Guest (kernel) stack. */ 86 /* We're staying on the same Guest (kernel) stack. */
87 virtstack = lg->regs->esp; 87 virtstack = cpu->regs->esp;
88 ss = lg->regs->ss; 88 ss = cpu->regs->ss;
89 89
90 origstack = gstack = guest_pa(lg, virtstack); 90 origstack = gstack = guest_pa(cpu, virtstack);
91 } 91 }
92 92
93 /* Remember that we never let the Guest actually disable interrupts, so 93 /* Remember that we never let the Guest actually disable interrupts, so
94 * the "Interrupt Flag" bit is always set. We copy that bit from the 94 * the "Interrupt Flag" bit is always set. We copy that bit from the
95 * Guest's "irq_enabled" field into the eflags word: we saw the Guest 95 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
96 * copy it back in "lguest_iret". */ 96 * copy it back in "lguest_iret". */
97 eflags = lg->regs->eflags; 97 eflags = cpu->regs->eflags;
98 if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0 98 if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
99 && !(irq_enable & X86_EFLAGS_IF)) 99 && !(irq_enable & X86_EFLAGS_IF))
100 eflags &= ~X86_EFLAGS_IF; 100 eflags &= ~X86_EFLAGS_IF;
101 101
102 /* An interrupt is expected to push three things on the stack: the old 102 /* An interrupt is expected to push three things on the stack: the old
103 * "eflags" word, the old code segment, and the old instruction 103 * "eflags" word, the old code segment, and the old instruction
104 * pointer. */ 104 * pointer. */
105 push_guest_stack(lg, &gstack, eflags); 105 push_guest_stack(cpu, &gstack, eflags);
106 push_guest_stack(lg, &gstack, lg->regs->cs); 106 push_guest_stack(cpu, &gstack, cpu->regs->cs);
107 push_guest_stack(lg, &gstack, lg->regs->eip); 107 push_guest_stack(cpu, &gstack, cpu->regs->eip);
108 108
109 /* For the six traps which supply an error code, we push that, too. */ 109 /* For the six traps which supply an error code, we push that, too. */
110 if (has_err) 110 if (has_err)
111 push_guest_stack(lg, &gstack, lg->regs->errcode); 111 push_guest_stack(cpu, &gstack, cpu->regs->errcode);
112 112
113 /* Now we've pushed all the old state, we change the stack, the code 113 /* Now we've pushed all the old state, we change the stack, the code
114 * segment and the address to execute. */ 114 * segment and the address to execute. */
115 lg->regs->ss = ss; 115 cpu->regs->ss = ss;
116 lg->regs->esp = virtstack + (gstack - origstack); 116 cpu->regs->esp = virtstack + (gstack - origstack);
117 lg->regs->cs = (__KERNEL_CS|GUEST_PL); 117 cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
118 lg->regs->eip = idt_address(lo, hi); 118 cpu->regs->eip = idt_address(lo, hi);
119 119
120 /* There are two kinds of interrupt handlers: 0xE is an "interrupt 120 /* There are two kinds of interrupt handlers: 0xE is an "interrupt
121 * gate" which expects interrupts to be disabled on entry. */ 121 * gate" which expects interrupts to be disabled on entry. */
122 if (idt_type(lo, hi) == 0xE) 122 if (idt_type(lo, hi) == 0xE)
123 if (put_user(0, &lg->lguest_data->irq_enabled)) 123 if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
124 kill_guest(lg, "Disabling interrupts"); 124 kill_guest(cpu, "Disabling interrupts");
125} 125}
126 126
127/*H:205 127/*H:205
@@ -129,23 +129,23 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
129 * 129 *
130 * maybe_do_interrupt() gets called before every entry to the Guest, to see if 130 * maybe_do_interrupt() gets called before every entry to the Guest, to see if
131 * we should divert the Guest to running an interrupt handler. */ 131 * we should divert the Guest to running an interrupt handler. */
132void maybe_do_interrupt(struct lguest *lg) 132void maybe_do_interrupt(struct lg_cpu *cpu)
133{ 133{
134 unsigned int irq; 134 unsigned int irq;
135 DECLARE_BITMAP(blk, LGUEST_IRQS); 135 DECLARE_BITMAP(blk, LGUEST_IRQS);
136 struct desc_struct *idt; 136 struct desc_struct *idt;
137 137
138 /* If the Guest hasn't even initialized yet, we can do nothing. */ 138 /* If the Guest hasn't even initialized yet, we can do nothing. */
139 if (!lg->lguest_data) 139 if (!cpu->lg->lguest_data)
140 return; 140 return;
141 141
142 /* Take our "irqs_pending" array and remove any interrupts the Guest 142 /* Take our "irqs_pending" array and remove any interrupts the Guest
143 * wants blocked: the result ends up in "blk". */ 143 * wants blocked: the result ends up in "blk". */
144 if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts, 144 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
145 sizeof(blk))) 145 sizeof(blk)))
146 return; 146 return;
147 147
148 bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS); 148 bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
149 149
150 /* Find the first interrupt. */ 150 /* Find the first interrupt. */
151 irq = find_first_bit(blk, LGUEST_IRQS); 151 irq = find_first_bit(blk, LGUEST_IRQS);
@@ -155,19 +155,20 @@ void maybe_do_interrupt(struct lguest *lg)
155 155
156 /* They may be in the middle of an iret, where they asked us never to 156 /* They may be in the middle of an iret, where they asked us never to
157 * deliver interrupts. */ 157 * deliver interrupts. */
158 if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end) 158 if (cpu->regs->eip >= cpu->lg->noirq_start &&
159 (cpu->regs->eip < cpu->lg->noirq_end))
159 return; 160 return;
160 161
161 /* If they're halted, interrupts restart them. */ 162 /* If they're halted, interrupts restart them. */
162 if (lg->halted) { 163 if (cpu->halted) {
163 /* Re-enable interrupts. */ 164 /* Re-enable interrupts. */
164 if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled)) 165 if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
165 kill_guest(lg, "Re-enabling interrupts"); 166 kill_guest(cpu, "Re-enabling interrupts");
166 lg->halted = 0; 167 cpu->halted = 0;
167 } else { 168 } else {
168 /* Otherwise we check if they have interrupts disabled. */ 169 /* Otherwise we check if they have interrupts disabled. */
169 u32 irq_enabled; 170 u32 irq_enabled;
170 if (get_user(irq_enabled, &lg->lguest_data->irq_enabled)) 171 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
171 irq_enabled = 0; 172 irq_enabled = 0;
172 if (!irq_enabled) 173 if (!irq_enabled)
173 return; 174 return;
@@ -176,15 +177,15 @@ void maybe_do_interrupt(struct lguest *lg)
176 /* Look at the IDT entry the Guest gave us for this interrupt. The 177 /* Look at the IDT entry the Guest gave us for this interrupt. The
177 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip 178 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
178 * over them. */ 179 * over them. */
179 idt = &lg->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; 180 idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
180 /* If they don't have a handler (yet?), we just ignore it */ 181 /* If they don't have a handler (yet?), we just ignore it */
181 if (idt_present(idt->a, idt->b)) { 182 if (idt_present(idt->a, idt->b)) {
182 /* OK, mark it no longer pending and deliver it. */ 183 /* OK, mark it no longer pending and deliver it. */
183 clear_bit(irq, lg->irqs_pending); 184 clear_bit(irq, cpu->irqs_pending);
184 /* set_guest_interrupt() takes the interrupt descriptor and a 185 /* set_guest_interrupt() takes the interrupt descriptor and a
185 * flag to say whether this interrupt pushes an error code onto 186 * flag to say whether this interrupt pushes an error code onto
186 * the stack as well: virtual interrupts never do. */ 187 * the stack as well: virtual interrupts never do. */
187 set_guest_interrupt(lg, idt->a, idt->b, 0); 188 set_guest_interrupt(cpu, idt->a, idt->b, 0);
188 } 189 }
189 190
190 /* Every time we deliver an interrupt, we update the timestamp in the 191 /* Every time we deliver an interrupt, we update the timestamp in the
@@ -192,7 +193,7 @@ void maybe_do_interrupt(struct lguest *lg)
192 * did this more often, but it can actually be quite slow: doing it 193 * did this more often, but it can actually be quite slow: doing it
193 * here is a compromise which means at least it gets updated every 194 * here is a compromise which means at least it gets updated every
194 * timer interrupt. */ 195 * timer interrupt. */
195 write_timestamp(lg); 196 write_timestamp(cpu);
196} 197}
197/*:*/ 198/*:*/
198 199
@@ -245,19 +246,19 @@ static int has_err(unsigned int trap)
245} 246}
246 247
247/* deliver_trap() returns true if it could deliver the trap. */ 248/* deliver_trap() returns true if it could deliver the trap. */
248int deliver_trap(struct lguest *lg, unsigned int num) 249int deliver_trap(struct lg_cpu *cpu, unsigned int num)
249{ 250{
250 /* Trap numbers are always 8 bit, but we set an impossible trap number 251 /* Trap numbers are always 8 bit, but we set an impossible trap number
251 * for traps inside the Switcher, so check that here. */ 252 * for traps inside the Switcher, so check that here. */
252 if (num >= ARRAY_SIZE(lg->arch.idt)) 253 if (num >= ARRAY_SIZE(cpu->arch.idt))
253 return 0; 254 return 0;
254 255
255 /* Early on the Guest hasn't set the IDT entries (or maybe it put a 256 /* Early on the Guest hasn't set the IDT entries (or maybe it put a
256 * bogus one in): if we fail here, the Guest will be killed. */ 257 * bogus one in): if we fail here, the Guest will be killed. */
257 if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b)) 258 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
258 return 0; 259 return 0;
259 set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, 260 set_guest_interrupt(cpu, cpu->arch.idt[num].a,
260 has_err(num)); 261 cpu->arch.idt[num].b, has_err(num));
261 return 1; 262 return 1;
262} 263}
263 264
@@ -309,18 +310,18 @@ static int direct_trap(unsigned int num)
309 * the Guest. 310 * the Guest.
310 * 311 *
311 * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ 312 * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */
312void pin_stack_pages(struct lguest *lg) 313void pin_stack_pages(struct lg_cpu *cpu)
313{ 314{
314 unsigned int i; 315 unsigned int i;
315 316
316 /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or 317 /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or
317 * two pages of stack space. */ 318 * two pages of stack space. */
318 for (i = 0; i < lg->stack_pages; i++) 319 for (i = 0; i < cpu->lg->stack_pages; i++)
319 /* The stack grows *upwards*, so the address we're given is the 320 /* The stack grows *upwards*, so the address we're given is the
320 * start of the page after the kernel stack. Subtract one to 321 * start of the page after the kernel stack. Subtract one to
321 * get back onto the first stack page, and keep subtracting to 322 * get back onto the first stack page, and keep subtracting to
322 * get to the rest of the stack pages. */ 323 * get to the rest of the stack pages. */
323 pin_page(lg, lg->esp1 - 1 - i * PAGE_SIZE); 324 pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
324} 325}
325 326
326/* Direct traps also mean that we need to know whenever the Guest wants to use 327/* Direct traps also mean that we need to know whenever the Guest wants to use
@@ -331,21 +332,21 @@ void pin_stack_pages(struct lguest *lg)
331 * 332 *
332 * In Linux each process has its own kernel stack, so this happens a lot: we 333 * In Linux each process has its own kernel stack, so this happens a lot: we
333 * change stacks on each context switch. */ 334 * change stacks on each context switch. */
334void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages) 335void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
335{ 336{
336 /* You are not allowed have a stack segment with privilege level 0: bad 337 /* You are not allowed have a stack segment with privilege level 0: bad
337 * Guest! */ 338 * Guest! */
338 if ((seg & 0x3) != GUEST_PL) 339 if ((seg & 0x3) != GUEST_PL)
339 kill_guest(lg, "bad stack segment %i", seg); 340 kill_guest(cpu, "bad stack segment %i", seg);
340 /* We only expect one or two stack pages. */ 341 /* We only expect one or two stack pages. */
341 if (pages > 2) 342 if (pages > 2)
342 kill_guest(lg, "bad stack pages %u", pages); 343 kill_guest(cpu, "bad stack pages %u", pages);
343 /* Save where the stack is, and how many pages */ 344 /* Save where the stack is, and how many pages */
344 lg->ss1 = seg; 345 cpu->ss1 = seg;
345 lg->esp1 = esp; 346 cpu->esp1 = esp;
346 lg->stack_pages = pages; 347 cpu->lg->stack_pages = pages;
347 /* Make sure the new stack pages are mapped */ 348 /* Make sure the new stack pages are mapped */
348 pin_stack_pages(lg); 349 pin_stack_pages(cpu);
349} 350}
350 351
351/* All this reference to mapping stacks leads us neatly into the other complex 352/* All this reference to mapping stacks leads us neatly into the other complex
@@ -353,7 +354,7 @@ void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
353 354
354/*H:235 This is the routine which actually checks the Guest's IDT entry and 355/*H:235 This is the routine which actually checks the Guest's IDT entry and
355 * transfers it into the entry in "struct lguest": */ 356 * transfers it into the entry in "struct lguest": */
356static void set_trap(struct lguest *lg, struct desc_struct *trap, 357static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
357 unsigned int num, u32 lo, u32 hi) 358 unsigned int num, u32 lo, u32 hi)
358{ 359{
359 u8 type = idt_type(lo, hi); 360 u8 type = idt_type(lo, hi);
@@ -366,7 +367,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,
366 367
367 /* We only support interrupt and trap gates. */ 368 /* We only support interrupt and trap gates. */
368 if (type != 0xE && type != 0xF) 369 if (type != 0xE && type != 0xF)
369 kill_guest(lg, "bad IDT type %i", type); 370 kill_guest(cpu, "bad IDT type %i", type);
370 371
371 /* We only copy the handler address, present bit, privilege level and 372 /* We only copy the handler address, present bit, privilege level and
372 * type. The privilege level controls where the trap can be triggered 373 * type. The privilege level controls where the trap can be triggered
@@ -383,7 +384,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,
383 * 384 *
384 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the 385 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
385 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ 386 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */
386void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi) 387void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
387{ 388{
388 /* Guest never handles: NMI, doublefault, spurious interrupt or 389 /* Guest never handles: NMI, doublefault, spurious interrupt or
389 * hypercall. We ignore when it tries to set them. */ 390 * hypercall. We ignore when it tries to set them. */
@@ -392,13 +393,13 @@ void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
392 393
393 /* Mark the IDT as changed: next time the Guest runs we'll know we have 394 /* Mark the IDT as changed: next time the Guest runs we'll know we have
394 * to copy this again. */ 395 * to copy this again. */
395 lg->changed |= CHANGED_IDT; 396 cpu->changed |= CHANGED_IDT;
396 397
397 /* Check that the Guest doesn't try to step outside the bounds. */ 398 /* Check that the Guest doesn't try to step outside the bounds. */
398 if (num >= ARRAY_SIZE(lg->arch.idt)) 399 if (num >= ARRAY_SIZE(cpu->arch.idt))
399 kill_guest(lg, "Setting idt entry %u", num); 400 kill_guest(cpu, "Setting idt entry %u", num);
400 else 401 else
401 set_trap(lg, &lg->arch.idt[num], num, lo, hi); 402 set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
402} 403}
403 404
404/* The default entry for each interrupt points into the Switcher routines which 405/* The default entry for each interrupt points into the Switcher routines which
@@ -434,14 +435,14 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
434/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead 435/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead
435 * we copy them into the IDT which we've set up for Guests on this CPU, just 436 * we copy them into the IDT which we've set up for Guests on this CPU, just
436 * before we run the Guest. This routine does that copy. */ 437 * before we run the Guest. This routine does that copy. */
437void copy_traps(const struct lguest *lg, struct desc_struct *idt, 438void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
438 const unsigned long *def) 439 const unsigned long *def)
439{ 440{
440 unsigned int i; 441 unsigned int i;
441 442
442 /* We can simply copy the direct traps, otherwise we use the default 443 /* We can simply copy the direct traps, otherwise we use the default
443 * ones in the Switcher: they will return to the Host. */ 444 * ones in the Switcher: they will return to the Host. */
444 for (i = 0; i < ARRAY_SIZE(lg->arch.idt); i++) { 445 for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
445 /* If no Guest can ever override this trap, leave it alone. */ 446 /* If no Guest can ever override this trap, leave it alone. */
446 if (!direct_trap(i)) 447 if (!direct_trap(i))
447 continue; 448 continue;
@@ -450,8 +451,8 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
450 * Interrupt gates (type 14) disable interrupts as they are 451 * Interrupt gates (type 14) disable interrupts as they are
451 * entered, which we never let the Guest do. Not present 452 * entered, which we never let the Guest do. Not present
452 * entries (type 0x0) also can't go direct, of course. */ 453 * entries (type 0x0) also can't go direct, of course. */
453 if (idt_type(lg->arch.idt[i].a, lg->arch.idt[i].b) == 0xF) 454 if (idt_type(cpu->arch.idt[i].a, cpu->arch.idt[i].b) == 0xF)
454 idt[i] = lg->arch.idt[i]; 455 idt[i] = cpu->arch.idt[i];
455 else 456 else
456 /* Reset it to the default. */ 457 /* Reset it to the default. */
457 default_idt_entry(&idt[i], i, def[i]); 458 default_idt_entry(&idt[i], i, def[i]);
@@ -470,13 +471,13 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt,
470 * infrastructure to set a callback at that time. 471 * infrastructure to set a callback at that time.
471 * 472 *
472 * 0 means "turn off the clock". */ 473 * 0 means "turn off the clock". */
473void guest_set_clockevent(struct lguest *lg, unsigned long delta) 474void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
474{ 475{
475 ktime_t expires; 476 ktime_t expires;
476 477
477 if (unlikely(delta == 0)) { 478 if (unlikely(delta == 0)) {
478 /* Clock event device is shutting down. */ 479 /* Clock event device is shutting down. */
479 hrtimer_cancel(&lg->hrt); 480 hrtimer_cancel(&cpu->hrt);
480 return; 481 return;
481 } 482 }
482 483
@@ -484,25 +485,25 @@ void guest_set_clockevent(struct lguest *lg, unsigned long delta)
484 * all the time between now and the timer interrupt it asked for. This 485 * all the time between now and the timer interrupt it asked for. This
485 * is almost always the right thing to do. */ 486 * is almost always the right thing to do. */
486 expires = ktime_add_ns(ktime_get_real(), delta); 487 expires = ktime_add_ns(ktime_get_real(), delta);
487 hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS); 488 hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
488} 489}
489 490
490/* This is the function called when the Guest's timer expires. */ 491/* This is the function called when the Guest's timer expires. */
491static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) 492static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
492{ 493{
493 struct lguest *lg = container_of(timer, struct lguest, hrt); 494 struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
494 495
495 /* Remember the first interrupt is the timer interrupt. */ 496 /* Remember the first interrupt is the timer interrupt. */
496 set_bit(0, lg->irqs_pending); 497 set_bit(0, cpu->irqs_pending);
497 /* If the Guest is actually stopped, we need to wake it up. */ 498 /* If the Guest is actually stopped, we need to wake it up. */
498 if (lg->halted) 499 if (cpu->halted)
499 wake_up_process(lg->tsk); 500 wake_up_process(cpu->tsk);
500 return HRTIMER_NORESTART; 501 return HRTIMER_NORESTART;
501} 502}
502 503
503/* This sets up the timer for this Guest. */ 504/* This sets up the timer for this Guest. */
504void init_clockdev(struct lguest *lg) 505void init_clockdev(struct lg_cpu *cpu)
505{ 506{
506 hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); 507 hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
507 lg->hrt.function = clockdev_fn; 508 cpu->hrt.function = clockdev_fn;
508} 509}