aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-17 16:19:42 -0500
committerRusty Russell <rusty@rustcorp.com.au>2008-01-30 06:50:18 -0500
commit382ac6b3fbc0ea6a5697fc6caaf7e7de12fa8b96 (patch)
treebdda012251f29775b2e1201f3b2b3e38c4680f42
parent934faab464c6a26ed1a226b6cf7111b35405dde1 (diff)
lguest: get rid of lg variable assignments
We can save some lines of code by getting rid of *lg = cpu... lines of code spread everywhere by now. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--drivers/lguest/core.c24
-rw-r--r--drivers/lguest/hypercalls.c49
-rw-r--r--drivers/lguest/interrupts_and_traps.c54
-rw-r--r--drivers/lguest/lg.h28
-rw-r--r--drivers/lguest/page_tables.c115
-rw-r--r--drivers/lguest/segments.c8
-rw-r--r--drivers/lguest/x86/core.c30
7 files changed, 149 insertions, 159 deletions
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 6023872e32d0..7743d73768df 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -151,23 +151,23 @@ int lguest_address_ok(const struct lguest *lg,
151/* This routine copies memory from the Guest. Here we can see how useful the 151/* This routine copies memory from the Guest. Here we can see how useful the
152 * kill_lguest() routine we met in the Launcher can be: we return a random 152 * kill_lguest() routine we met in the Launcher can be: we return a random
153 * value (all zeroes) instead of needing to return an error. */ 153 * value (all zeroes) instead of needing to return an error. */
154void __lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes) 154void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes)
155{ 155{
156 if (!lguest_address_ok(lg, addr, bytes) 156 if (!lguest_address_ok(cpu->lg, addr, bytes)
157 || copy_from_user(b, lg->mem_base + addr, bytes) != 0) { 157 || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) {
158 /* copy_from_user should do this, but as we rely on it... */ 158 /* copy_from_user should do this, but as we rely on it... */
159 memset(b, 0, bytes); 159 memset(b, 0, bytes);
160 kill_guest(lg, "bad read address %#lx len %u", addr, bytes); 160 kill_guest(cpu, "bad read address %#lx len %u", addr, bytes);
161 } 161 }
162} 162}
163 163
164/* This is the write (copy into guest) version. */ 164/* This is the write (copy into guest) version. */
165void __lgwrite(struct lguest *lg, unsigned long addr, const void *b, 165void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
166 unsigned bytes) 166 unsigned bytes)
167{ 167{
168 if (!lguest_address_ok(lg, addr, bytes) 168 if (!lguest_address_ok(cpu->lg, addr, bytes)
169 || copy_to_user(lg->mem_base + addr, b, bytes) != 0) 169 || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0)
170 kill_guest(lg, "bad write address %#lx len %u", addr, bytes); 170 kill_guest(cpu, "bad write address %#lx len %u", addr, bytes);
171} 171}
172/*:*/ 172/*:*/
173 173
@@ -176,10 +176,8 @@ void __lgwrite(struct lguest *lg, unsigned long addr, const void *b,
176 * going around and around until something interesting happens. */ 176 * going around and around until something interesting happens. */
177int run_guest(struct lg_cpu *cpu, unsigned long __user *user) 177int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
178{ 178{
179 struct lguest *lg = cpu->lg;
180
181 /* We stop running once the Guest is dead. */ 179 /* We stop running once the Guest is dead. */
182 while (!lg->dead) { 180 while (!cpu->lg->dead) {
183 /* First we run any hypercalls the Guest wants done. */ 181 /* First we run any hypercalls the Guest wants done. */
184 if (cpu->hcall) 182 if (cpu->hcall)
185 do_hypercalls(cpu); 183 do_hypercalls(cpu);
@@ -212,7 +210,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
212 210
213 /* Just make absolutely sure the Guest is still alive. One of 211 /* Just make absolutely sure the Guest is still alive. One of
214 * those hypercalls could have been fatal, for example. */ 212 * those hypercalls could have been fatal, for example. */
215 if (lg->dead) 213 if (cpu->lg->dead)
216 break; 214 break;
217 215
218 /* If the Guest asked to be stopped, we sleep. The Guest's 216 /* If the Guest asked to be stopped, we sleep. The Guest's
@@ -237,7 +235,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
237 lguest_arch_handle_trap(cpu); 235 lguest_arch_handle_trap(cpu);
238 } 236 }
239 237
240 if (lg->dead == ERR_PTR(-ERESTART)) 238 if (cpu->lg->dead == ERR_PTR(-ERESTART))
241 return -ERESTART; 239 return -ERESTART;
242 /* The Guest is dead => "No such file or directory" */ 240 /* The Guest is dead => "No such file or directory" */
243 return -ENOENT; 241 return -ENOENT;
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 0471018d700d..32666d0d956a 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -31,8 +31,6 @@
31 * Or gets killed. Or, in the case of LHCALL_CRASH, both. */ 31 * Or gets killed. Or, in the case of LHCALL_CRASH, both. */
32static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) 32static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
33{ 33{
34 struct lguest *lg = cpu->lg;
35
36 switch (args->arg0) { 34 switch (args->arg0) {
37 case LHCALL_FLUSH_ASYNC: 35 case LHCALL_FLUSH_ASYNC:
38 /* This call does nothing, except by breaking out of the Guest 36 /* This call does nothing, except by breaking out of the Guest
@@ -41,7 +39,7 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
41 case LHCALL_LGUEST_INIT: 39 case LHCALL_LGUEST_INIT:
42 /* You can't get here unless you're already initialized. Don't 40 /* You can't get here unless you're already initialized. Don't
43 * do that. */ 41 * do that. */
44 kill_guest(lg, "already have lguest_data"); 42 kill_guest(cpu, "already have lguest_data");
45 break; 43 break;
46 case LHCALL_SHUTDOWN: { 44 case LHCALL_SHUTDOWN: {
47 /* Shutdown is such a trivial hypercall that we do it in four 45 /* Shutdown is such a trivial hypercall that we do it in four
@@ -49,11 +47,11 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
49 char msg[128]; 47 char msg[128];
50 /* If the lgread fails, it will call kill_guest() itself; the 48 /* If the lgread fails, it will call kill_guest() itself; the
51 * kill_guest() with the message will be ignored. */ 49 * kill_guest() with the message will be ignored. */
52 __lgread(lg, msg, args->arg1, sizeof(msg)); 50 __lgread(cpu, msg, args->arg1, sizeof(msg));
53 msg[sizeof(msg)-1] = '\0'; 51 msg[sizeof(msg)-1] = '\0';
54 kill_guest(lg, "CRASH: %s", msg); 52 kill_guest(cpu, "CRASH: %s", msg);
55 if (args->arg2 == LGUEST_SHUTDOWN_RESTART) 53 if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
56 lg->dead = ERR_PTR(-ERESTART); 54 cpu->lg->dead = ERR_PTR(-ERESTART);
57 break; 55 break;
58 } 56 }
59 case LHCALL_FLUSH_TLB: 57 case LHCALL_FLUSH_TLB:
@@ -74,10 +72,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
74 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); 72 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
75 break; 73 break;
76 case LHCALL_SET_PTE: 74 case LHCALL_SET_PTE:
77 guest_set_pte(lg, args->arg1, args->arg2, __pte(args->arg3)); 75 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
78 break; 76 break;
79 case LHCALL_SET_PMD: 77 case LHCALL_SET_PMD:
80 guest_set_pmd(lg, args->arg1, args->arg2); 78 guest_set_pmd(cpu->lg, args->arg1, args->arg2);
81 break; 79 break;
82 case LHCALL_SET_CLOCKEVENT: 80 case LHCALL_SET_CLOCKEVENT:
83 guest_set_clockevent(cpu, args->arg1); 81 guest_set_clockevent(cpu, args->arg1);
@@ -96,7 +94,7 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
96 default: 94 default:
97 /* It should be an architecture-specific hypercall. */ 95 /* It should be an architecture-specific hypercall. */
98 if (lguest_arch_do_hcall(cpu, args)) 96 if (lguest_arch_do_hcall(cpu, args))
99 kill_guest(lg, "Bad hypercall %li\n", args->arg0); 97 kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
100 } 98 }
101} 99}
102/*:*/ 100/*:*/
@@ -112,10 +110,9 @@ static void do_async_hcalls(struct lg_cpu *cpu)
112{ 110{
113 unsigned int i; 111 unsigned int i;
114 u8 st[LHCALL_RING_SIZE]; 112 u8 st[LHCALL_RING_SIZE];
115 struct lguest *lg = cpu->lg;
116 113
117 /* For simplicity, we copy the entire call status array in at once. */ 114 /* For simplicity, we copy the entire call status array in at once. */
118 if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st))) 115 if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
119 return; 116 return;
120 117
121 /* We process "struct lguest_data"s hcalls[] ring once. */ 118 /* We process "struct lguest_data"s hcalls[] ring once. */
@@ -137,9 +134,9 @@ static void do_async_hcalls(struct lg_cpu *cpu)
137 134
138 /* Copy the hypercall arguments into a local copy of 135 /* Copy the hypercall arguments into a local copy of
139 * the hcall_args struct. */ 136 * the hcall_args struct. */
140 if (copy_from_user(&args, &lg->lguest_data->hcalls[n], 137 if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
141 sizeof(struct hcall_args))) { 138 sizeof(struct hcall_args))) {
142 kill_guest(lg, "Fetching async hypercalls"); 139 kill_guest(cpu, "Fetching async hypercalls");
143 break; 140 break;
144 } 141 }
145 142
@@ -147,8 +144,8 @@ static void do_async_hcalls(struct lg_cpu *cpu)
147 do_hcall(cpu, &args); 144 do_hcall(cpu, &args);
148 145
149 /* Mark the hypercall done. */ 146 /* Mark the hypercall done. */
150 if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) { 147 if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
151 kill_guest(lg, "Writing result for async hypercall"); 148 kill_guest(cpu, "Writing result for async hypercall");
152 break; 149 break;
153 } 150 }
154 151
@@ -163,29 +160,28 @@ static void do_async_hcalls(struct lg_cpu *cpu)
163 * Guest makes a hypercall, we end up here to set things up: */ 160 * Guest makes a hypercall, we end up here to set things up: */
164static void initialize(struct lg_cpu *cpu) 161static void initialize(struct lg_cpu *cpu)
165{ 162{
166 struct lguest *lg = cpu->lg;
167 /* You can't do anything until you're initialized. The Guest knows the 163 /* You can't do anything until you're initialized. The Guest knows the
168 * rules, so we're unforgiving here. */ 164 * rules, so we're unforgiving here. */
169 if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { 165 if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
170 kill_guest(lg, "hypercall %li before INIT", cpu->hcall->arg0); 166 kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
171 return; 167 return;
172 } 168 }
173 169
174 if (lguest_arch_init_hypercalls(cpu)) 170 if (lguest_arch_init_hypercalls(cpu))
175 kill_guest(lg, "bad guest page %p", lg->lguest_data); 171 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
176 172
177 /* The Guest tells us where we're not to deliver interrupts by putting 173 /* The Guest tells us where we're not to deliver interrupts by putting
178 * the range of addresses into "struct lguest_data". */ 174 * the range of addresses into "struct lguest_data". */
179 if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start) 175 if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
180 || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)) 176 || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
181 kill_guest(lg, "bad guest page %p", lg->lguest_data); 177 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
182 178
183 /* We write the current time into the Guest's data page once so it can 179 /* We write the current time into the Guest's data page once so it can
184 * set its clock. */ 180 * set its clock. */
185 write_timestamp(lg); 181 write_timestamp(cpu);
186 182
187 /* page_tables.c will also do some setup. */ 183 /* page_tables.c will also do some setup. */
188 page_table_guest_data_init(lg); 184 page_table_guest_data_init(cpu);
189 185
190 /* This is the one case where the above accesses might have been the 186 /* This is the one case where the above accesses might have been the
191 * first write to a Guest page. This may have caused a copy-on-write 187 * first write to a Guest page. This may have caused a copy-on-write
@@ -237,10 +233,11 @@ void do_hypercalls(struct lg_cpu *cpu)
237 233
238/* This routine supplies the Guest with time: it's used for wallclock time at 234/* This routine supplies the Guest with time: it's used for wallclock time at
239 * initial boot and as a rough time source if the TSC isn't available. */ 235 * initial boot and as a rough time source if the TSC isn't available. */
240void write_timestamp(struct lguest *lg) 236void write_timestamp(struct lg_cpu *cpu)
241{ 237{
242 struct timespec now; 238 struct timespec now;
243 ktime_get_real_ts(&now); 239 ktime_get_real_ts(&now);
244 if (copy_to_user(&lg->lguest_data->time, &now, sizeof(struct timespec))) 240 if (copy_to_user(&cpu->lg->lguest_data->time,
245 kill_guest(lg, "Writing timestamp"); 241 &now, sizeof(struct timespec)))
242 kill_guest(cpu, "Writing timestamp");
246} 243}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 9ac7455ec7fb..32e97c1858e5 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -41,11 +41,11 @@ static int idt_present(u32 lo, u32 hi)
41 41
42/* We need a helper to "push" a value onto the Guest's stack, since that's a 42/* We need a helper to "push" a value onto the Guest's stack, since that's a
43 * big part of what delivering an interrupt does. */ 43 * big part of what delivering an interrupt does. */
44static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val) 44static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
45{ 45{
46 /* Stack grows upwards: move stack then write value. */ 46 /* Stack grows upwards: move stack then write value. */
47 *gstack -= 4; 47 *gstack -= 4;
48 lgwrite(lg, *gstack, u32, val); 48 lgwrite(cpu, *gstack, u32, val);
49} 49}
50 50
51/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or 51/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
@@ -65,7 +65,6 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
65 unsigned long gstack, origstack; 65 unsigned long gstack, origstack;
66 u32 eflags, ss, irq_enable; 66 u32 eflags, ss, irq_enable;
67 unsigned long virtstack; 67 unsigned long virtstack;
68 struct lguest *lg = cpu->lg;
69 68
70 /* There are two cases for interrupts: one where the Guest is already 69 /* There are two cases for interrupts: one where the Guest is already
71 * in the kernel, and a more complex one where the Guest is in 70 * in the kernel, and a more complex one where the Guest is in
@@ -81,8 +80,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
81 * stack: when the Guest does an "iret" back from the interrupt 80 * stack: when the Guest does an "iret" back from the interrupt
82 * handler the CPU will notice they're dropping privilege 81 * handler the CPU will notice they're dropping privilege
83 * levels and expect these here. */ 82 * levels and expect these here. */
84 push_guest_stack(lg, &gstack, cpu->regs->ss); 83 push_guest_stack(cpu, &gstack, cpu->regs->ss);
85 push_guest_stack(lg, &gstack, cpu->regs->esp); 84 push_guest_stack(cpu, &gstack, cpu->regs->esp);
86 } else { 85 } else {
87 /* We're staying on the same Guest (kernel) stack. */ 86 /* We're staying on the same Guest (kernel) stack. */
88 virtstack = cpu->regs->esp; 87 virtstack = cpu->regs->esp;
@@ -96,20 +95,20 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
96 * Guest's "irq_enabled" field into the eflags word: we saw the Guest 95 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
97 * copy it back in "lguest_iret". */ 96 * copy it back in "lguest_iret". */
98 eflags = cpu->regs->eflags; 97 eflags = cpu->regs->eflags;
99 if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0 98 if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
100 && !(irq_enable & X86_EFLAGS_IF)) 99 && !(irq_enable & X86_EFLAGS_IF))
101 eflags &= ~X86_EFLAGS_IF; 100 eflags &= ~X86_EFLAGS_IF;
102 101
103 /* An interrupt is expected to push three things on the stack: the old 102 /* An interrupt is expected to push three things on the stack: the old
104 * "eflags" word, the old code segment, and the old instruction 103 * "eflags" word, the old code segment, and the old instruction
105 * pointer. */ 104 * pointer. */
106 push_guest_stack(lg, &gstack, eflags); 105 push_guest_stack(cpu, &gstack, eflags);
107 push_guest_stack(lg, &gstack, cpu->regs->cs); 106 push_guest_stack(cpu, &gstack, cpu->regs->cs);
108 push_guest_stack(lg, &gstack, cpu->regs->eip); 107 push_guest_stack(cpu, &gstack, cpu->regs->eip);
109 108
110 /* For the six traps which supply an error code, we push that, too. */ 109 /* For the six traps which supply an error code, we push that, too. */
111 if (has_err) 110 if (has_err)
112 push_guest_stack(lg, &gstack, cpu->regs->errcode); 111 push_guest_stack(cpu, &gstack, cpu->regs->errcode);
113 112
114 /* Now we've pushed all the old state, we change the stack, the code 113 /* Now we've pushed all the old state, we change the stack, the code
115 * segment and the address to execute. */ 114 * segment and the address to execute. */
@@ -121,8 +120,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
121 /* There are two kinds of interrupt handlers: 0xE is an "interrupt 120 /* There are two kinds of interrupt handlers: 0xE is an "interrupt
122 * gate" which expects interrupts to be disabled on entry. */ 121 * gate" which expects interrupts to be disabled on entry. */
123 if (idt_type(lo, hi) == 0xE) 122 if (idt_type(lo, hi) == 0xE)
124 if (put_user(0, &lg->lguest_data->irq_enabled)) 123 if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
125 kill_guest(lg, "Disabling interrupts"); 124 kill_guest(cpu, "Disabling interrupts");
126} 125}
127 126
128/*H:205 127/*H:205
@@ -133,17 +132,16 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
133void maybe_do_interrupt(struct lg_cpu *cpu) 132void maybe_do_interrupt(struct lg_cpu *cpu)
134{ 133{
135 unsigned int irq; 134 unsigned int irq;
136 struct lguest *lg = cpu->lg;
137 DECLARE_BITMAP(blk, LGUEST_IRQS); 135 DECLARE_BITMAP(blk, LGUEST_IRQS);
138 struct desc_struct *idt; 136 struct desc_struct *idt;
139 137
140 /* If the Guest hasn't even initialized yet, we can do nothing. */ 138 /* If the Guest hasn't even initialized yet, we can do nothing. */
141 if (!lg->lguest_data) 139 if (!cpu->lg->lguest_data)
142 return; 140 return;
143 141
144 /* Take our "irqs_pending" array and remove any interrupts the Guest 142 /* Take our "irqs_pending" array and remove any interrupts the Guest
145 * wants blocked: the result ends up in "blk". */ 143 * wants blocked: the result ends up in "blk". */
146 if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts, 144 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
147 sizeof(blk))) 145 sizeof(blk)))
148 return; 146 return;
149 147
@@ -157,19 +155,20 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
157 155
158 /* They may be in the middle of an iret, where they asked us never to 156 /* They may be in the middle of an iret, where they asked us never to
159 * deliver interrupts. */ 157 * deliver interrupts. */
160 if (cpu->regs->eip >= lg->noirq_start && cpu->regs->eip < lg->noirq_end) 158 if (cpu->regs->eip >= cpu->lg->noirq_start &&
159 (cpu->regs->eip < cpu->lg->noirq_end))
161 return; 160 return;
162 161
163 /* If they're halted, interrupts restart them. */ 162 /* If they're halted, interrupts restart them. */
164 if (cpu->halted) { 163 if (cpu->halted) {
165 /* Re-enable interrupts. */ 164 /* Re-enable interrupts. */
166 if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled)) 165 if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
167 kill_guest(lg, "Re-enabling interrupts"); 166 kill_guest(cpu, "Re-enabling interrupts");
168 cpu->halted = 0; 167 cpu->halted = 0;
169 } else { 168 } else {
170 /* Otherwise we check if they have interrupts disabled. */ 169 /* Otherwise we check if they have interrupts disabled. */
171 u32 irq_enabled; 170 u32 irq_enabled;
172 if (get_user(irq_enabled, &lg->lguest_data->irq_enabled)) 171 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
173 irq_enabled = 0; 172 irq_enabled = 0;
174 if (!irq_enabled) 173 if (!irq_enabled)
175 return; 174 return;
@@ -194,7 +193,7 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
194 * did this more often, but it can actually be quite slow: doing it 193 * did this more often, but it can actually be quite slow: doing it
195 * here is a compromise which means at least it gets updated every 194 * here is a compromise which means at least it gets updated every
196 * timer interrupt. */ 195 * timer interrupt. */
197 write_timestamp(lg); 196 write_timestamp(cpu);
198} 197}
199/*:*/ 198/*:*/
200 199
@@ -315,10 +314,9 @@ void pin_stack_pages(struct lg_cpu *cpu)
315{ 314{
316 unsigned int i; 315 unsigned int i;
317 316
318 struct lguest *lg = cpu->lg;
319 /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or 317 /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or
320 * two pages of stack space. */ 318 * two pages of stack space. */
321 for (i = 0; i < lg->stack_pages; i++) 319 for (i = 0; i < cpu->lg->stack_pages; i++)
322 /* The stack grows *upwards*, so the address we're given is the 320 /* The stack grows *upwards*, so the address we're given is the
323 * start of the page after the kernel stack. Subtract one to 321 * start of the page after the kernel stack. Subtract one to
324 * get back onto the first stack page, and keep subtracting to 322 * get back onto the first stack page, and keep subtracting to
@@ -339,10 +337,10 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
339 /* You are not allowed have a stack segment with privilege level 0: bad 337 /* You are not allowed have a stack segment with privilege level 0: bad
340 * Guest! */ 338 * Guest! */
341 if ((seg & 0x3) != GUEST_PL) 339 if ((seg & 0x3) != GUEST_PL)
342 kill_guest(cpu->lg, "bad stack segment %i", seg); 340 kill_guest(cpu, "bad stack segment %i", seg);
343 /* We only expect one or two stack pages. */ 341 /* We only expect one or two stack pages. */
344 if (pages > 2) 342 if (pages > 2)
345 kill_guest(cpu->lg, "bad stack pages %u", pages); 343 kill_guest(cpu, "bad stack pages %u", pages);
346 /* Save where the stack is, and how many pages */ 344 /* Save where the stack is, and how many pages */
347 cpu->ss1 = seg; 345 cpu->ss1 = seg;
348 cpu->esp1 = esp; 346 cpu->esp1 = esp;
@@ -356,7 +354,7 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
356 354
357/*H:235 This is the routine which actually checks the Guest's IDT entry and 355/*H:235 This is the routine which actually checks the Guest's IDT entry and
358 * transfers it into the entry in "struct lguest": */ 356 * transfers it into the entry in "struct lguest": */
359static void set_trap(struct lguest *lg, struct desc_struct *trap, 357static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
360 unsigned int num, u32 lo, u32 hi) 358 unsigned int num, u32 lo, u32 hi)
361{ 359{
362 u8 type = idt_type(lo, hi); 360 u8 type = idt_type(lo, hi);
@@ -369,7 +367,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,
369 367
370 /* We only support interrupt and trap gates. */ 368 /* We only support interrupt and trap gates. */
371 if (type != 0xE && type != 0xF) 369 if (type != 0xE && type != 0xF)
372 kill_guest(lg, "bad IDT type %i", type); 370 kill_guest(cpu, "bad IDT type %i", type);
373 371
374 /* We only copy the handler address, present bit, privilege level and 372 /* We only copy the handler address, present bit, privilege level and
375 * type. The privilege level controls where the trap can be triggered 373 * type. The privilege level controls where the trap can be triggered
@@ -399,9 +397,9 @@ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
399 397
400 /* Check that the Guest doesn't try to step outside the bounds. */ 398 /* Check that the Guest doesn't try to step outside the bounds. */
401 if (num >= ARRAY_SIZE(cpu->arch.idt)) 399 if (num >= ARRAY_SIZE(cpu->arch.idt))
402 kill_guest(cpu->lg, "Setting idt entry %u", num); 400 kill_guest(cpu, "Setting idt entry %u", num);
403 else 401 else
404 set_trap(cpu->lg, &cpu->arch.idt[num], num, lo, hi); 402 set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
405} 403}
406 404
407/* The default entry for each interrupt points into the Switcher routines which 405/* The default entry for each interrupt points into the Switcher routines which
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 0d6f6435d72c..b75ce3b17afe 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -111,22 +111,22 @@ extern struct mutex lguest_lock;
111/* core.c: */ 111/* core.c: */
112int lguest_address_ok(const struct lguest *lg, 112int lguest_address_ok(const struct lguest *lg,
113 unsigned long addr, unsigned long len); 113 unsigned long addr, unsigned long len);
114void __lgread(struct lguest *, void *, unsigned long, unsigned); 114void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
115void __lgwrite(struct lguest *, unsigned long, const void *, unsigned); 115void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
116 116
117/*H:035 Using memory-copy operations like that is usually inconvient, so we 117/*H:035 Using memory-copy operations like that is usually inconvient, so we
118 * have the following helper macros which read and write a specific type (often 118 * have the following helper macros which read and write a specific type (often
119 * an unsigned long). 119 * an unsigned long).
120 * 120 *
121 * This reads into a variable of the given type then returns that. */ 121 * This reads into a variable of the given type then returns that. */
122#define lgread(lg, addr, type) \ 122#define lgread(cpu, addr, type) \
123 ({ type _v; __lgread((lg), &_v, (addr), sizeof(_v)); _v; }) 123 ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
124 124
125/* This checks that the variable is of the given type, then writes it out. */ 125/* This checks that the variable is of the given type, then writes it out. */
126#define lgwrite(lg, addr, type, val) \ 126#define lgwrite(cpu, addr, type, val) \
127 do { \ 127 do { \
128 typecheck(type, val); \ 128 typecheck(type, val); \
129 __lgwrite((lg), (addr), &(val), sizeof(val)); \ 129 __lgwrite((cpu), (addr), &(val), sizeof(val)); \
130 } while(0) 130 } while(0)
131/* (end of memory access helper routines) :*/ 131/* (end of memory access helper routines) :*/
132 132
@@ -171,13 +171,13 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
171void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); 171void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
172void guest_pagetable_clear_all(struct lg_cpu *cpu); 172void guest_pagetable_clear_all(struct lg_cpu *cpu);
173void guest_pagetable_flush_user(struct lg_cpu *cpu); 173void guest_pagetable_flush_user(struct lg_cpu *cpu);
174void guest_set_pte(struct lguest *lg, unsigned long gpgdir, 174void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
175 unsigned long vaddr, pte_t val); 175 unsigned long vaddr, pte_t val);
176void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); 176void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
177int demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); 177int demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
178void pin_page(struct lg_cpu *cpu, unsigned long vaddr); 178void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
179unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); 179unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
180void page_table_guest_data_init(struct lguest *lg); 180void page_table_guest_data_init(struct lg_cpu *cpu);
181 181
182/* <arch>/core.c: */ 182/* <arch>/core.c: */
183void lguest_arch_host_init(void); 183void lguest_arch_host_init(void);
@@ -197,7 +197,7 @@ void lguest_device_remove(void);
197 197
198/* hypercalls.c: */ 198/* hypercalls.c: */
199void do_hypercalls(struct lg_cpu *cpu); 199void do_hypercalls(struct lg_cpu *cpu);
200void write_timestamp(struct lguest *lg); 200void write_timestamp(struct lg_cpu *cpu);
201 201
202/*L:035 202/*L:035
203 * Let's step aside for the moment, to study one important routine that's used 203 * Let's step aside for the moment, to study one important routine that's used
@@ -223,12 +223,12 @@ void write_timestamp(struct lguest *lg);
223 * Like any macro which uses an "if", it is safely wrapped in a run-once "do { 223 * Like any macro which uses an "if", it is safely wrapped in a run-once "do {
224 * } while(0)". 224 * } while(0)".
225 */ 225 */
226#define kill_guest(lg, fmt...) \ 226#define kill_guest(cpu, fmt...) \
227do { \ 227do { \
228 if (!(lg)->dead) { \ 228 if (!(cpu)->lg->dead) { \
229 (lg)->dead = kasprintf(GFP_ATOMIC, fmt); \ 229 (cpu)->lg->dead = kasprintf(GFP_ATOMIC, fmt); \
230 if (!(lg)->dead) \ 230 if (!(cpu)->lg->dead) \
231 (lg)->dead = ERR_PTR(-ENOMEM); \ 231 (cpu)->lg->dead = ERR_PTR(-ENOMEM); \
232 } \ 232 } \
233} while(0) 233} while(0)
234/* (End of aside) :*/ 234/* (End of aside) :*/
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index c9acafcab2aa..983e9020cef8 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -68,17 +68,17 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
68 * page directory entry (PGD) for that address. Since we keep track of several 68 * page directory entry (PGD) for that address. Since we keep track of several
69 * page tables, the "i" argument tells us which one we're interested in (it's 69 * page tables, the "i" argument tells us which one we're interested in (it's
70 * usually the current one). */ 70 * usually the current one). */
71static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) 71static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
72{ 72{
73 unsigned int index = pgd_index(vaddr); 73 unsigned int index = pgd_index(vaddr);
74 74
75 /* We kill any Guest trying to touch the Switcher addresses. */ 75 /* We kill any Guest trying to touch the Switcher addresses. */
76 if (index >= SWITCHER_PGD_INDEX) { 76 if (index >= SWITCHER_PGD_INDEX) {
77 kill_guest(lg, "attempt to access switcher pages"); 77 kill_guest(cpu, "attempt to access switcher pages");
78 index = 0; 78 index = 0;
79 } 79 }
80 /* Return a pointer index'th pgd entry for the i'th page table. */ 80 /* Return a pointer index'th pgd entry for the i'th page table. */
81 return &lg->pgdirs[i].pgdir[index]; 81 return &cpu->lg->pgdirs[i].pgdir[index];
82} 82}
83 83
84/* This routine then takes the page directory entry returned above, which 84/* This routine then takes the page directory entry returned above, which
@@ -137,7 +137,7 @@ static unsigned long get_pfn(unsigned long virtpfn, int write)
137 * entry can be a little tricky. The flags are (almost) the same, but the 137 * entry can be a little tricky. The flags are (almost) the same, but the
138 * Guest PTE contains a virtual page number: the CPU needs the real page 138 * Guest PTE contains a virtual page number: the CPU needs the real page
139 * number. */ 139 * number. */
140static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write) 140static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
141{ 141{
142 unsigned long pfn, base, flags; 142 unsigned long pfn, base, flags;
143 143
@@ -148,7 +148,7 @@ static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
148 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); 148 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
149 149
150 /* The Guest's pages are offset inside the Launcher. */ 150 /* The Guest's pages are offset inside the Launcher. */
151 base = (unsigned long)lg->mem_base / PAGE_SIZE; 151 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
152 152
153 /* We need a temporary "unsigned long" variable to hold the answer from 153 /* We need a temporary "unsigned long" variable to hold the answer from
154 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't 154 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
@@ -156,7 +156,7 @@ static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
156 * page, given the virtual number. */ 156 * page, given the virtual number. */
157 pfn = get_pfn(base + pte_pfn(gpte), write); 157 pfn = get_pfn(base + pte_pfn(gpte), write);
158 if (pfn == -1UL) { 158 if (pfn == -1UL) {
159 kill_guest(lg, "failed to get page %lu", pte_pfn(gpte)); 159 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
160 /* When we destroy the Guest, we'll go through the shadow page 160 /* When we destroy the Guest, we'll go through the shadow page
161 * tables and release_pte() them. Make sure we don't think 161 * tables and release_pte() them. Make sure we don't think
162 * this one is valid! */ 162 * this one is valid! */
@@ -176,17 +176,18 @@ static void release_pte(pte_t pte)
176} 176}
177/*:*/ 177/*:*/
178 178
179static void check_gpte(struct lguest *lg, pte_t gpte) 179static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
180{ 180{
181 if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE)) 181 if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE))
182 || pte_pfn(gpte) >= lg->pfn_limit) 182 || pte_pfn(gpte) >= cpu->lg->pfn_limit)
183 kill_guest(lg, "bad page table entry"); 183 kill_guest(cpu, "bad page table entry");
184} 184}
185 185
186static void check_gpgd(struct lguest *lg, pgd_t gpgd) 186static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
187{ 187{
188 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || pgd_pfn(gpgd) >= lg->pfn_limit) 188 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
189 kill_guest(lg, "bad page directory entry"); 189 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
190 kill_guest(cpu, "bad page directory entry");
190} 191}
191 192
192/*H:330 193/*H:330
@@ -206,27 +207,26 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
206 unsigned long gpte_ptr; 207 unsigned long gpte_ptr;
207 pte_t gpte; 208 pte_t gpte;
208 pte_t *spte; 209 pte_t *spte;
209 struct lguest *lg = cpu->lg;
210 210
211 /* First step: get the top-level Guest page table entry. */ 211 /* First step: get the top-level Guest page table entry. */
212 gpgd = lgread(lg, gpgd_addr(cpu, vaddr), pgd_t); 212 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
213 /* Toplevel not present? We can't map it in. */ 213 /* Toplevel not present? We can't map it in. */
214 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 214 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
215 return 0; 215 return 0;
216 216
217 /* Now look at the matching shadow entry. */ 217 /* Now look at the matching shadow entry. */
218 spgd = spgd_addr(lg, cpu->cpu_pgd, vaddr); 218 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
219 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { 219 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
220 /* No shadow entry: allocate a new shadow PTE page. */ 220 /* No shadow entry: allocate a new shadow PTE page. */
221 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 221 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
222 /* This is not really the Guest's fault, but killing it is 222 /* This is not really the Guest's fault, but killing it is
223 * simple for this corner case. */ 223 * simple for this corner case. */
224 if (!ptepage) { 224 if (!ptepage) {
225 kill_guest(lg, "out of memory allocating pte page"); 225 kill_guest(cpu, "out of memory allocating pte page");
226 return 0; 226 return 0;
227 } 227 }
228 /* We check that the Guest pgd is OK. */ 228 /* We check that the Guest pgd is OK. */
229 check_gpgd(lg, gpgd); 229 check_gpgd(cpu, gpgd);
230 /* And we copy the flags to the shadow PGD entry. The page 230 /* And we copy the flags to the shadow PGD entry. The page
231 * number in the shadow PGD is the page we just allocated. */ 231 * number in the shadow PGD is the page we just allocated. */
232 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); 232 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
@@ -235,7 +235,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
235 /* OK, now we look at the lower level in the Guest page table: keep its 235 /* OK, now we look at the lower level in the Guest page table: keep its
236 * address, because we might update it later. */ 236 * address, because we might update it later. */
237 gpte_ptr = gpte_addr(gpgd, vaddr); 237 gpte_ptr = gpte_addr(gpgd, vaddr);
238 gpte = lgread(lg, gpte_ptr, pte_t); 238 gpte = lgread(cpu, gpte_ptr, pte_t);
239 239
240 /* If this page isn't in the Guest page tables, we can't page it in. */ 240 /* If this page isn't in the Guest page tables, we can't page it in. */
241 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 241 if (!(pte_flags(gpte) & _PAGE_PRESENT))
@@ -252,7 +252,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
252 252
253 /* Check that the Guest PTE flags are OK, and the page number is below 253 /* Check that the Guest PTE flags are OK, and the page number is below
254 * the pfn_limit (ie. not mapping the Launcher binary). */ 254 * the pfn_limit (ie. not mapping the Launcher binary). */
255 check_gpte(lg, gpte); 255 check_gpte(cpu, gpte);
256 256
257 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ 257 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
258 gpte = pte_mkyoung(gpte); 258 gpte = pte_mkyoung(gpte);
@@ -268,17 +268,17 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
268 /* If this is a write, we insist that the Guest page is writable (the 268 /* If this is a write, we insist that the Guest page is writable (the
269 * final arg to gpte_to_spte()). */ 269 * final arg to gpte_to_spte()). */
270 if (pte_dirty(gpte)) 270 if (pte_dirty(gpte))
271 *spte = gpte_to_spte(lg, gpte, 1); 271 *spte = gpte_to_spte(cpu, gpte, 1);
272 else 272 else
273 /* If this is a read, don't set the "writable" bit in the page 273 /* If this is a read, don't set the "writable" bit in the page
274 * table entry, even if the Guest says it's writable. That way 274 * table entry, even if the Guest says it's writable. That way
275 * we will come back here when a write does actually occur, so 275 * we will come back here when a write does actually occur, so
276 * we can update the Guest's _PAGE_DIRTY flag. */ 276 * we can update the Guest's _PAGE_DIRTY flag. */
277 *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0); 277 *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0);
278 278
279 /* Finally, we write the Guest PTE entry back: we've set the 279 /* Finally, we write the Guest PTE entry back: we've set the
280 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 280 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
281 lgwrite(lg, gpte_ptr, pte_t, gpte); 281 lgwrite(cpu, gpte_ptr, pte_t, gpte);
282 282
283 /* The fault is fixed, the page table is populated, the mapping 283 /* The fault is fixed, the page table is populated, the mapping
284 * manipulated, the result returned and the code complete. A small 284 * manipulated, the result returned and the code complete. A small
@@ -303,7 +303,7 @@ static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
303 unsigned long flags; 303 unsigned long flags;
304 304
305 /* Look at the current top level entry: is it present? */ 305 /* Look at the current top level entry: is it present? */
306 spgd = spgd_addr(cpu->lg, cpu->cpu_pgd, vaddr); 306 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
307 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 307 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
308 return 0; 308 return 0;
309 309
@@ -320,7 +320,7 @@ static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
320void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 320void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
321{ 321{
322 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 322 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
323 kill_guest(cpu->lg, "bad stack page %#lx", vaddr); 323 kill_guest(cpu, "bad stack page %#lx", vaddr);
324} 324}
325 325
326/*H:450 If we chase down the release_pgd() code, it looks like this: */ 326/*H:450 If we chase down the release_pgd() code, it looks like this: */
@@ -372,14 +372,14 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
372 pte_t gpte; 372 pte_t gpte;
373 373
374 /* First step: get the top-level Guest page table entry. */ 374 /* First step: get the top-level Guest page table entry. */
375 gpgd = lgread(cpu->lg, gpgd_addr(cpu, vaddr), pgd_t); 375 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
376 /* Toplevel not present? We can't map it in. */ 376 /* Toplevel not present? We can't map it in. */
377 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 377 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
378 kill_guest(cpu->lg, "Bad address %#lx", vaddr); 378 kill_guest(cpu, "Bad address %#lx", vaddr);
379 379
380 gpte = lgread(cpu->lg, gpte_addr(gpgd, vaddr), pte_t); 380 gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
381 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 381 if (!(pte_flags(gpte) & _PAGE_PRESENT))
382 kill_guest(cpu->lg, "Bad address %#lx", vaddr); 382 kill_guest(cpu, "Bad address %#lx", vaddr);
383 383
384 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 384 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
385} 385}
@@ -404,16 +404,16 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
404 int *blank_pgdir) 404 int *blank_pgdir)
405{ 405{
406 unsigned int next; 406 unsigned int next;
407 struct lguest *lg = cpu->lg;
408 407
409 /* We pick one entry at random to throw out. Choosing the Least 408 /* We pick one entry at random to throw out. Choosing the Least
410 * Recently Used might be better, but this is easy. */ 409 * Recently Used might be better, but this is easy. */
411 next = random32() % ARRAY_SIZE(lg->pgdirs); 410 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
412 /* If it's never been allocated at all before, try now. */ 411 /* If it's never been allocated at all before, try now. */
413 if (!lg->pgdirs[next].pgdir) { 412 if (!cpu->lg->pgdirs[next].pgdir) {
414 lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 413 cpu->lg->pgdirs[next].pgdir =
414 (pgd_t *)get_zeroed_page(GFP_KERNEL);
415 /* If the allocation fails, just keep using the one we have */ 415 /* If the allocation fails, just keep using the one we have */
416 if (!lg->pgdirs[next].pgdir) 416 if (!cpu->lg->pgdirs[next].pgdir)
417 next = cpu->cpu_pgd; 417 next = cpu->cpu_pgd;
418 else 418 else
419 /* This is a blank page, so there are no kernel 419 /* This is a blank page, so there are no kernel
@@ -421,9 +421,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
421 *blank_pgdir = 1; 421 *blank_pgdir = 1;
422 } 422 }
423 /* Record which Guest toplevel this shadows. */ 423 /* Record which Guest toplevel this shadows. */
424 lg->pgdirs[next].gpgdir = gpgdir; 424 cpu->lg->pgdirs[next].gpgdir = gpgdir;
425 /* Release all the non-kernel mappings. */ 425 /* Release all the non-kernel mappings. */
426 flush_user_mappings(lg, next); 426 flush_user_mappings(cpu->lg, next);
427 427
428 return next; 428 return next;
429} 429}
@@ -436,13 +436,12 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
436void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 436void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
437{ 437{
438 int newpgdir, repin = 0; 438 int newpgdir, repin = 0;
439 struct lguest *lg = cpu->lg;
440 439
441 /* Look to see if we have this one already. */ 440 /* Look to see if we have this one already. */
442 newpgdir = find_pgdir(lg, pgtable); 441 newpgdir = find_pgdir(cpu->lg, pgtable);
443 /* If not, we allocate or mug an existing one: if it's a fresh one, 442 /* If not, we allocate or mug an existing one: if it's a fresh one,
444 * repin gets set to 1. */ 443 * repin gets set to 1. */
445 if (newpgdir == ARRAY_SIZE(lg->pgdirs)) 444 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
446 newpgdir = new_pgdir(cpu, pgtable, &repin); 445 newpgdir = new_pgdir(cpu, pgtable, &repin);
447 /* Change the current pgd index to the new one. */ 446 /* Change the current pgd index to the new one. */
448 cpu->cpu_pgd = newpgdir; 447 cpu->cpu_pgd = newpgdir;
@@ -499,11 +498,11 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu)
499 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if 498 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
500 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. 499 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
501 */ 500 */
502static void do_set_pte(struct lguest *lg, int idx, 501static void do_set_pte(struct lg_cpu *cpu, int idx,
503 unsigned long vaddr, pte_t gpte) 502 unsigned long vaddr, pte_t gpte)
504{ 503{
505 /* Look up the matching shadow page directory entry. */ 504 /* Look up the matching shadow page directory entry. */
506 pgd_t *spgd = spgd_addr(lg, idx, vaddr); 505 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
507 506
508 /* If the top level isn't present, there's no entry to update. */ 507 /* If the top level isn't present, there's no entry to update. */
509 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 508 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -515,8 +514,8 @@ static void do_set_pte(struct lguest *lg, int idx,
515 * as well put that entry they've given us in now. This shaves 514 * as well put that entry they've given us in now. This shaves
516 * 10% off a copy-on-write micro-benchmark. */ 515 * 10% off a copy-on-write micro-benchmark. */
517 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 516 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
518 check_gpte(lg, gpte); 517 check_gpte(cpu, gpte);
519 *spte = gpte_to_spte(lg, gpte, 518 *spte = gpte_to_spte(cpu, gpte,
520 pte_flags(gpte) & _PAGE_DIRTY); 519 pte_flags(gpte) & _PAGE_DIRTY);
521 } else 520 } else
522 /* Otherwise kill it and we can demand_page() it in 521 /* Otherwise kill it and we can demand_page() it in
@@ -535,22 +534,22 @@ static void do_set_pte(struct lguest *lg, int idx,
535 * 534 *
536 * The benefit is that when we have to track a new page table, we can copy keep 535 * The benefit is that when we have to track a new page table, we can copy keep
537 * all the kernel mappings. This speeds up context switch immensely. */ 536 * all the kernel mappings. This speeds up context switch immensely. */
538void guest_set_pte(struct lguest *lg, 537void guest_set_pte(struct lg_cpu *cpu,
539 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) 538 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
540{ 539{
541 /* Kernel mappings must be changed on all top levels. Slow, but 540 /* Kernel mappings must be changed on all top levels. Slow, but
542 * doesn't happen often. */ 541 * doesn't happen often. */
543 if (vaddr >= lg->kernel_address) { 542 if (vaddr >= cpu->lg->kernel_address) {
544 unsigned int i; 543 unsigned int i;
545 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 544 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
546 if (lg->pgdirs[i].pgdir) 545 if (cpu->lg->pgdirs[i].pgdir)
547 do_set_pte(lg, i, vaddr, gpte); 546 do_set_pte(cpu, i, vaddr, gpte);
548 } else { 547 } else {
549 /* Is this page table one we have a shadow for? */ 548 /* Is this page table one we have a shadow for? */
550 int pgdir = find_pgdir(lg, gpgdir); 549 int pgdir = find_pgdir(cpu->lg, gpgdir);
551 if (pgdir != ARRAY_SIZE(lg->pgdirs)) 550 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
552 /* If so, do the update. */ 551 /* If so, do the update. */
553 do_set_pte(lg, pgdir, vaddr, gpte); 552 do_set_pte(cpu, pgdir, vaddr, gpte);
554 } 553 }
555} 554}
556 555
@@ -601,21 +600,23 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
601} 600}
602 601
603/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ 602/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
604void page_table_guest_data_init(struct lguest *lg) 603void page_table_guest_data_init(struct lg_cpu *cpu)
605{ 604{
606 /* We get the kernel address: above this is all kernel memory. */ 605 /* We get the kernel address: above this is all kernel memory. */
607 if (get_user(lg->kernel_address, &lg->lguest_data->kernel_address) 606 if (get_user(cpu->lg->kernel_address,
607 &cpu->lg->lguest_data->kernel_address)
608 /* We tell the Guest that it can't use the top 4MB of virtual 608 /* We tell the Guest that it can't use the top 4MB of virtual
609 * addresses used by the Switcher. */ 609 * addresses used by the Switcher. */
610 || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem) 610 || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
611 || put_user(lg->pgdirs[0].gpgdir, &lg->lguest_data->pgdir)) 611 || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
612 kill_guest(lg, "bad guest page %p", lg->lguest_data); 612 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
613 613
614 /* In flush_user_mappings() we loop from 0 to 614 /* In flush_user_mappings() we loop from 0 to
615 * "pgd_index(lg->kernel_address)". This assumes it won't hit the 615 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
616 * Switcher mappings, so check that now. */ 616 * Switcher mappings, so check that now. */
617 if (pgd_index(lg->kernel_address) >= SWITCHER_PGD_INDEX) 617 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
618 kill_guest(lg, "bad kernel address %#lx", lg->kernel_address); 618 kill_guest(cpu, "bad kernel address %#lx",
619 cpu->lg->kernel_address);
619} 620}
620 621
621/* When a Guest dies, our cleanup is fairly simple. */ 622/* When a Guest dies, our cleanup is fairly simple. */
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 635f54c719ae..ec6aa3f1c36b 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -148,14 +148,13 @@ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
148 * We copy it from the Guest and tweak the entries. */ 148 * We copy it from the Guest and tweak the entries. */
149void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num) 149void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num)
150{ 150{
151 struct lguest *lg = cpu->lg;
152 /* We assume the Guest has the same number of GDT entries as the 151 /* We assume the Guest has the same number of GDT entries as the
153 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
154 if (num > ARRAY_SIZE(cpu->arch.gdt)) 153 if (num > ARRAY_SIZE(cpu->arch.gdt))
155 kill_guest(lg, "too many gdt entries %i", num); 154 kill_guest(cpu, "too many gdt entries %i", num);
156 155
157 /* We read the whole thing in, then fix it up. */ 156 /* We read the whole thing in, then fix it up. */
158 __lgread(lg, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0])); 157 __lgread(cpu, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0]));
159 fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt)); 158 fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt));
160 /* Mark that the GDT changed so the core knows it has to copy it again, 159 /* Mark that the GDT changed so the core knows it has to copy it again,
161 * even if the Guest is run on the same CPU. */ 160 * even if the Guest is run on the same CPU. */
@@ -169,9 +168,8 @@ void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num)
169void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) 168void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
170{ 169{
171 struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; 170 struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
172 struct lguest *lg = cpu->lg;
173 171
174 __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); 172 __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
175 fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); 173 fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
176 /* Note that just the TLS entries have changed. */ 174 /* Note that just the TLS entries have changed. */
177 cpu->changed |= CHANGED_GDT_TLS; 175 cpu->changed |= CHANGED_GDT_TLS;
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index fd6a8512443c..e9c3ba8aa1ec 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -117,7 +117,6 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
117{ 117{
118 /* This is a dummy value we need for GCC's sake. */ 118 /* This is a dummy value we need for GCC's sake. */
119 unsigned int clobber; 119 unsigned int clobber;
120 struct lguest *lg = cpu->lg;
121 120
122 /* Copy the guest-specific information into this CPU's "struct 121 /* Copy the guest-specific information into this CPU's "struct
123 * lguest_pages". */ 122 * lguest_pages". */
@@ -144,7 +143,7 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
144 * 0-th argument above, ie "a"). %ebx contains the 143 * 0-th argument above, ie "a"). %ebx contains the
145 * physical address of the Guest's top-level page 144 * physical address of the Guest's top-level page
146 * directory. */ 145 * directory. */
147 : "0"(pages), "1"(__pa(lg->pgdirs[cpu->cpu_pgd].pgdir)) 146 : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir))
148 /* We tell gcc that all these registers could change, 147 /* We tell gcc that all these registers could change,
149 * which means we don't have to save and restore them in 148 * which means we don't have to save and restore them in
150 * the Switcher. */ 149 * the Switcher. */
@@ -217,7 +216,6 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
217 * instructions and skip over it. We return true if we did. */ 216 * instructions and skip over it. We return true if we did. */
218static int emulate_insn(struct lg_cpu *cpu) 217static int emulate_insn(struct lg_cpu *cpu)
219{ 218{
220 struct lguest *lg = cpu->lg;
221 u8 insn; 219 u8 insn;
222 unsigned int insnlen = 0, in = 0, shift = 0; 220 unsigned int insnlen = 0, in = 0, shift = 0;
223 /* The eip contains the *virtual* address of the Guest's instruction: 221 /* The eip contains the *virtual* address of the Guest's instruction:
@@ -231,7 +229,7 @@ static int emulate_insn(struct lg_cpu *cpu)
231 return 0; 229 return 0;
232 230
233 /* Decoding x86 instructions is icky. */ 231 /* Decoding x86 instructions is icky. */
234 insn = lgread(lg, physaddr, u8); 232 insn = lgread(cpu, physaddr, u8);
235 233
236 /* 0x66 is an "operand prefix". It means it's using the upper 16 bits 234 /* 0x66 is an "operand prefix". It means it's using the upper 16 bits
237 of the eax register. */ 235 of the eax register. */
@@ -239,7 +237,7 @@ static int emulate_insn(struct lg_cpu *cpu)
239 shift = 16; 237 shift = 16;
240 /* The instruction is 1 byte so far, read the next byte. */ 238 /* The instruction is 1 byte so far, read the next byte. */
241 insnlen = 1; 239 insnlen = 1;
242 insn = lgread(lg, physaddr + insnlen, u8); 240 insn = lgread(cpu, physaddr + insnlen, u8);
243 } 241 }
244 242
245 /* We can ignore the lower bit for the moment and decode the 4 opcodes 243 /* We can ignore the lower bit for the moment and decode the 4 opcodes
@@ -283,7 +281,6 @@ static int emulate_insn(struct lg_cpu *cpu)
283/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ 281/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
284void lguest_arch_handle_trap(struct lg_cpu *cpu) 282void lguest_arch_handle_trap(struct lg_cpu *cpu)
285{ 283{
286 struct lguest *lg = cpu->lg;
287 switch (cpu->regs->trapnum) { 284 switch (cpu->regs->trapnum) {
288 case 13: /* We've intercepted a General Protection Fault. */ 285 case 13: /* We've intercepted a General Protection Fault. */
289 /* Check if this was one of those annoying IN or OUT 286 /* Check if this was one of those annoying IN or OUT
@@ -315,9 +312,10 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
315 * Note that if the Guest were really messed up, this could 312 * Note that if the Guest were really messed up, this could
316 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so 313 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
317 * lg->lguest_data could be NULL */ 314 * lg->lguest_data could be NULL */
318 if (lg->lguest_data && 315 if (cpu->lg->lguest_data &&
319 put_user(cpu->arch.last_pagefault, &lg->lguest_data->cr2)) 316 put_user(cpu->arch.last_pagefault,
320 kill_guest(lg, "Writing cr2"); 317 &cpu->lg->lguest_data->cr2))
318 kill_guest(cpu, "Writing cr2");
321 break; 319 break;
322 case 7: /* We've intercepted a Device Not Available fault. */ 320 case 7: /* We've intercepted a Device Not Available fault. */
323 /* If the Guest doesn't want to know, we already restored the 321 /* If the Guest doesn't want to know, we already restored the
@@ -345,7 +343,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
345 /* If the Guest doesn't have a handler (either it hasn't 343 /* If the Guest doesn't have a handler (either it hasn't
346 * registered any yet, or it's one of the faults we don't let 344 * registered any yet, or it's one of the faults we don't let
347 * it handle), it dies with a cryptic error message. */ 345 * it handle), it dies with a cryptic error message. */
348 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)", 346 kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)",
349 cpu->regs->trapnum, cpu->regs->eip, 347 cpu->regs->trapnum, cpu->regs->eip,
350 cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault 348 cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
351 : cpu->regs->errcode); 349 : cpu->regs->errcode);
@@ -514,11 +512,11 @@ int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
514int lguest_arch_init_hypercalls(struct lg_cpu *cpu) 512int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
515{ 513{
516 u32 tsc_speed; 514 u32 tsc_speed;
517 struct lguest *lg = cpu->lg;
518 515
519 /* The pointer to the Guest's "struct lguest_data" is the only 516 /* The pointer to the Guest's "struct lguest_data" is the only
520 * argument. We check that address now. */ 517 * argument. We check that address now. */
521 if (!lguest_address_ok(lg, cpu->hcall->arg1, sizeof(*lg->lguest_data))) 518 if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1,
519 sizeof(*cpu->lg->lguest_data)))
522 return -EFAULT; 520 return -EFAULT;
523 521
524 /* Having checked it, we simply set lg->lguest_data to point straight 522 /* Having checked it, we simply set lg->lguest_data to point straight
@@ -526,7 +524,7 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
526 * copy_to_user/from_user from now on, instead of lgread/write. I put 524 * copy_to_user/from_user from now on, instead of lgread/write. I put
527 * this in to show that I'm not immune to writing stupid 525 * this in to show that I'm not immune to writing stupid
528 * optimizations. */ 526 * optimizations. */
529 lg->lguest_data = lg->mem_base + cpu->hcall->arg1; 527 cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1;
530 528
531 /* We insist that the Time Stamp Counter exist and doesn't change with 529 /* We insist that the Time Stamp Counter exist and doesn't change with
532 * cpu frequency. Some devious chip manufacturers decided that TSC 530 * cpu frequency. Some devious chip manufacturers decided that TSC
@@ -539,12 +537,12 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
539 tsc_speed = tsc_khz; 537 tsc_speed = tsc_khz;
540 else 538 else
541 tsc_speed = 0; 539 tsc_speed = 0;
542 if (put_user(tsc_speed, &lg->lguest_data->tsc_khz)) 540 if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz))
543 return -EFAULT; 541 return -EFAULT;
544 542
545 /* The interrupt code might not like the system call vector. */ 543 /* The interrupt code might not like the system call vector. */
546 if (!check_syscall_vector(lg)) 544 if (!check_syscall_vector(cpu->lg))
547 kill_guest(lg, "bad syscall vector"); 545 kill_guest(cpu, "bad syscall vector");
548 546
549 return 0; 547 return 0;
550} 548}