diff options
Diffstat (limited to 'drivers/lguest/hypercalls.c')
-rw-r--r-- | drivers/lguest/hypercalls.c | 104 |
1 files changed, 30 insertions, 74 deletions
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 0175a9f03347..2859a7687288 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c | |||
@@ -25,17 +25,13 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <asm/page.h> | 26 | #include <asm/page.h> |
27 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
28 | #include <irq_vectors.h> | ||
29 | #include "lg.h" | 28 | #include "lg.h" |
30 | 29 | ||
31 | /*H:120 This is the core hypercall routine: where the Guest gets what it | 30 | /*H:120 This is the core hypercall routine: where the Guest gets what it wants. |
32 | * wants. Or gets killed. Or, in the case of LHCALL_CRASH, both. | 31 | * Or gets killed. Or, in the case of LHCALL_CRASH, both. */ |
33 | * | 32 | static void do_hcall(struct lguest *lg, struct hcall_args *args) |
34 | * Remember from the Guest: %eax == which call to make, and the arguments are | ||
35 | * packed into %edx, %ebx and %ecx if needed. */ | ||
36 | static void do_hcall(struct lguest *lg, struct lguest_regs *regs) | ||
37 | { | 33 | { |
38 | switch (regs->eax) { | 34 | switch (args->arg0) { |
39 | case LHCALL_FLUSH_ASYNC: | 35 | case LHCALL_FLUSH_ASYNC: |
40 | /* This call does nothing, except by breaking out of the Guest | 36 | /* This call does nothing, except by breaking out of the Guest |
41 | * it makes us process all the asynchronous hypercalls. */ | 37 | * it makes us process all the asynchronous hypercalls. */ |
@@ -51,7 +47,7 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs) | |||
51 | char msg[128]; | 47 | char msg[128]; |
52 | /* If the lgread fails, it will call kill_guest() itself; the | 48 | /* If the lgread fails, it will call kill_guest() itself; the |
53 | * kill_guest() with the message will be ignored. */ | 49 | * kill_guest() with the message will be ignored. */ |
54 | lgread(lg, msg, regs->edx, sizeof(msg)); | 50 | lgread(lg, msg, args->arg1, sizeof(msg)); |
55 | msg[sizeof(msg)-1] = '\0'; | 51 | msg[sizeof(msg)-1] = '\0'; |
56 | kill_guest(lg, "CRASH: %s", msg); | 52 | kill_guest(lg, "CRASH: %s", msg); |
57 | break; | 53 | break; |
@@ -59,7 +55,7 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs) | |||
59 | case LHCALL_FLUSH_TLB: | 55 | case LHCALL_FLUSH_TLB: |
60 | /* FLUSH_TLB comes in two flavors, depending on the | 56 | /* FLUSH_TLB comes in two flavors, depending on the |
61 | * argument: */ | 57 | * argument: */ |
62 | if (regs->edx) | 58 | if (args->arg1) |
63 | guest_pagetable_clear_all(lg); | 59 | guest_pagetable_clear_all(lg); |
64 | else | 60 | else |
65 | guest_pagetable_flush_user(lg); | 61 | guest_pagetable_flush_user(lg); |
@@ -71,55 +67,47 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs) | |||
71 | * it here. This can legitimately fail, since we currently | 67 | * it here. This can legitimately fail, since we currently |
72 | * place a limit on the number of DMA pools a Guest can have. | 68 | * place a limit on the number of DMA pools a Guest can have. |
73 | * So we return true or false from this call. */ | 69 | * So we return true or false from this call. */ |
74 | regs->eax = bind_dma(lg, regs->edx, regs->ebx, | 70 | args->arg0 = bind_dma(lg, args->arg1, args->arg2, |
75 | regs->ecx >> 8, regs->ecx & 0xFF); | 71 | args->arg3 >> 8, args->arg3 & 0xFF); |
76 | break; | 72 | break; |
77 | 73 | ||
78 | /* All these calls simply pass the arguments through to the right | 74 | /* All these calls simply pass the arguments through to the right |
79 | * routines. */ | 75 | * routines. */ |
80 | case LHCALL_SEND_DMA: | 76 | case LHCALL_SEND_DMA: |
81 | send_dma(lg, regs->edx, regs->ebx); | 77 | send_dma(lg, args->arg1, args->arg2); |
82 | break; | ||
83 | case LHCALL_LOAD_GDT: | ||
84 | load_guest_gdt(lg, regs->edx, regs->ebx); | ||
85 | break; | ||
86 | case LHCALL_LOAD_IDT_ENTRY: | ||
87 | load_guest_idt_entry(lg, regs->edx, regs->ebx, regs->ecx); | ||
88 | break; | 78 | break; |
89 | case LHCALL_NEW_PGTABLE: | 79 | case LHCALL_NEW_PGTABLE: |
90 | guest_new_pagetable(lg, regs->edx); | 80 | guest_new_pagetable(lg, args->arg1); |
91 | break; | 81 | break; |
92 | case LHCALL_SET_STACK: | 82 | case LHCALL_SET_STACK: |
93 | guest_set_stack(lg, regs->edx, regs->ebx, regs->ecx); | 83 | guest_set_stack(lg, args->arg1, args->arg2, args->arg3); |
94 | break; | 84 | break; |
95 | case LHCALL_SET_PTE: | 85 | case LHCALL_SET_PTE: |
96 | guest_set_pte(lg, regs->edx, regs->ebx, mkgpte(regs->ecx)); | 86 | guest_set_pte(lg, args->arg1, args->arg2, mkgpte(args->arg3)); |
97 | break; | 87 | break; |
98 | case LHCALL_SET_PMD: | 88 | case LHCALL_SET_PMD: |
99 | guest_set_pmd(lg, regs->edx, regs->ebx); | 89 | guest_set_pmd(lg, args->arg1, args->arg2); |
100 | break; | ||
101 | case LHCALL_LOAD_TLS: | ||
102 | guest_load_tls(lg, regs->edx); | ||
103 | break; | 90 | break; |
104 | case LHCALL_SET_CLOCKEVENT: | 91 | case LHCALL_SET_CLOCKEVENT: |
105 | guest_set_clockevent(lg, regs->edx); | 92 | guest_set_clockevent(lg, args->arg1); |
106 | break; | 93 | break; |
107 | |||
108 | case LHCALL_TS: | 94 | case LHCALL_TS: |
109 | /* This sets the TS flag, as we saw used in run_guest(). */ | 95 | /* This sets the TS flag, as we saw used in run_guest(). */ |
110 | lg->ts = regs->edx; | 96 | lg->ts = args->arg1; |
111 | break; | 97 | break; |
112 | case LHCALL_HALT: | 98 | case LHCALL_HALT: |
113 | /* Similarly, this sets the halted flag for run_guest(). */ | 99 | /* Similarly, this sets the halted flag for run_guest(). */ |
114 | lg->halted = 1; | 100 | lg->halted = 1; |
115 | break; | 101 | break; |
116 | default: | 102 | default: |
117 | kill_guest(lg, "Bad hypercall %li\n", regs->eax); | 103 | if (lguest_arch_do_hcall(lg, args)) |
104 | kill_guest(lg, "Bad hypercall %li\n", args->arg0); | ||
118 | } | 105 | } |
119 | } | 106 | } |
107 | /*:*/ | ||
120 | 108 | ||
121 | /* Asynchronous hypercalls are easy: we just look in the array in the Guest's | 109 | /*H:124 Asynchronous hypercalls are easy: we just look in the array in the |
122 | * "struct lguest_data" and see if there are any new ones marked "ready". | 110 | * Guest's "struct lguest_data" to see if any new ones are marked "ready". |
123 | * | 111 | * |
124 | * We are careful to do these in order: obviously we respect the order the | 112 | * We are careful to do these in order: obviously we respect the order the |
125 | * Guest put them in the ring, but we also promise the Guest that they will | 113 | * Guest put them in the ring, but we also promise the Guest that they will |
@@ -134,10 +122,9 @@ static void do_async_hcalls(struct lguest *lg) | |||
134 | if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st))) | 122 | if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st))) |
135 | return; | 123 | return; |
136 | 124 | ||
137 | |||
138 | /* We process "struct lguest_data"s hcalls[] ring once. */ | 125 | /* We process "struct lguest_data"s hcalls[] ring once. */ |
139 | for (i = 0; i < ARRAY_SIZE(st); i++) { | 126 | for (i = 0; i < ARRAY_SIZE(st); i++) { |
140 | struct lguest_regs regs; | 127 | struct hcall_args args; |
141 | /* We remember where we were up to from last time. This makes | 128 | /* We remember where we were up to from last time. This makes |
142 | * sure that the hypercalls are done in the order the Guest | 129 | * sure that the hypercalls are done in the order the Guest |
143 | * places them in the ring. */ | 130 | * places them in the ring. */ |
@@ -152,18 +139,16 @@ static void do_async_hcalls(struct lguest *lg) | |||
152 | if (++lg->next_hcall == LHCALL_RING_SIZE) | 139 | if (++lg->next_hcall == LHCALL_RING_SIZE) |
153 | lg->next_hcall = 0; | 140 | lg->next_hcall = 0; |
154 | 141 | ||
155 | /* We copy the hypercall arguments into a fake register | 142 | /* Copy the hypercall arguments into a local copy of |
156 | * structure. This makes life simple for do_hcall(). */ | 143 | * the hcall_args struct. */ |
157 | if (get_user(regs.eax, &lg->lguest_data->hcalls[n].eax) | 144 | if (copy_from_user(&args, &lg->lguest_data->hcalls[n], |
158 | || get_user(regs.edx, &lg->lguest_data->hcalls[n].edx) | 145 | sizeof(struct hcall_args))) { |
159 | || get_user(regs.ecx, &lg->lguest_data->hcalls[n].ecx) | ||
160 | || get_user(regs.ebx, &lg->lguest_data->hcalls[n].ebx)) { | ||
161 | kill_guest(lg, "Fetching async hypercalls"); | 146 | kill_guest(lg, "Fetching async hypercalls"); |
162 | break; | 147 | break; |
163 | } | 148 | } |
164 | 149 | ||
165 | /* Do the hypercall, same as a normal one. */ | 150 | /* Do the hypercall, same as a normal one. */ |
166 | do_hcall(lg, ®s); | 151 | do_hcall(lg, &args); |
167 | 152 | ||
168 | /* Mark the hypercall done. */ | 153 | /* Mark the hypercall done. */ |
169 | if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) { | 154 | if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) { |
@@ -182,41 +167,16 @@ static void do_async_hcalls(struct lguest *lg) | |||
182 | * Guest makes a hypercall, we end up here to set things up: */ | 167 | * Guest makes a hypercall, we end up here to set things up: */ |
183 | static void initialize(struct lguest *lg) | 168 | static void initialize(struct lguest *lg) |
184 | { | 169 | { |
185 | u32 tsc_speed; | ||
186 | 170 | ||
187 | /* You can't do anything until you're initialized. The Guest knows the | 171 | /* You can't do anything until you're initialized. The Guest knows the |
188 | * rules, so we're unforgiving here. */ | 172 | * rules, so we're unforgiving here. */ |
189 | if (lg->regs->eax != LHCALL_LGUEST_INIT) { | 173 | if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) { |
190 | kill_guest(lg, "hypercall %li before LGUEST_INIT", | 174 | kill_guest(lg, "hypercall %li before INIT", lg->hcall->arg0); |
191 | lg->regs->eax); | ||
192 | return; | 175 | return; |
193 | } | 176 | } |
194 | 177 | ||
195 | /* We insist that the Time Stamp Counter exist and doesn't change with | 178 | if (lguest_arch_init_hypercalls(lg)) |
196 | * cpu frequency. Some devious chip manufacturers decided that TSC | ||
197 | * changes could be handled in software. I decided that time going | ||
198 | * backwards might be good for benchmarks, but it's bad for users. | ||
199 | * | ||
200 | * We also insist that the TSC be stable: the kernel detects unreliable | ||
201 | * TSCs for its own purposes, and we use that here. */ | ||
202 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) | ||
203 | tsc_speed = tsc_khz; | ||
204 | else | ||
205 | tsc_speed = 0; | ||
206 | |||
207 | /* The pointer to the Guest's "struct lguest_data" is the only | ||
208 | * argument. We check that address now. */ | ||
209 | if (!lguest_address_ok(lg, lg->regs->edx, sizeof(*lg->lguest_data))) { | ||
210 | kill_guest(lg, "bad guest page %p", lg->lguest_data); | 179 | kill_guest(lg, "bad guest page %p", lg->lguest_data); |
211 | return; | ||
212 | } | ||
213 | |||
214 | /* Having checked it, we simply set lg->lguest_data to point straight | ||
215 | * into the Launcher's memory at the right place and then use | ||
216 | * copy_to_user/from_user from now on, instead of lgread/write. I put | ||
217 | * this in to show that I'm not immune to writing stupid | ||
218 | * optimizations. */ | ||
219 | lg->lguest_data = lg->mem_base + lg->regs->edx; | ||
220 | 180 | ||
221 | /* The Guest tells us where we're not to deliver interrupts by putting | 181 | /* The Guest tells us where we're not to deliver interrupts by putting |
222 | * the range of addresses into "struct lguest_data". */ | 182 | * the range of addresses into "struct lguest_data". */ |
@@ -224,8 +184,7 @@ static void initialize(struct lguest *lg) | |||
224 | || get_user(lg->noirq_end, &lg->lguest_data->noirq_end) | 184 | || get_user(lg->noirq_end, &lg->lguest_data->noirq_end) |
225 | /* We tell the Guest that it can't use the top 4MB of virtual | 185 | /* We tell the Guest that it can't use the top 4MB of virtual |
226 | * addresses used by the Switcher. */ | 186 | * addresses used by the Switcher. */ |
227 | || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem) | 187 | || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)) |
228 | || put_user(tsc_speed, &lg->lguest_data->tsc_khz)) | ||
229 | kill_guest(lg, "bad guest page %p", lg->lguest_data); | 188 | kill_guest(lg, "bad guest page %p", lg->lguest_data); |
230 | 189 | ||
231 | /* We write the current time into the Guest's data page once now. */ | 190 | /* We write the current time into the Guest's data page once now. */ |
@@ -237,9 +196,6 @@ static void initialize(struct lguest *lg) | |||
237 | * page. */ | 196 | * page. */ |
238 | guest_pagetable_clear_all(lg); | 197 | guest_pagetable_clear_all(lg); |
239 | } | 198 | } |
240 | /* Now we've examined the hypercall code; our Guest can make requests. There | ||
241 | * is one other way we can do things for the Guest, as we see in | ||
242 | * emulate_insn(). */ | ||
243 | 199 | ||
244 | /*H:100 | 200 | /*H:100 |
245 | * Hypercalls | 201 | * Hypercalls |