diff options
Diffstat (limited to 'drivers/lguest/hypercalls.c')
| -rw-r--r-- | drivers/lguest/hypercalls.c | 106 |
1 files changed, 55 insertions, 51 deletions
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index b478affe8f91..0f2cb4fd7c69 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c | |||
| @@ -23,13 +23,14 @@ | |||
| 23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
| 24 | #include <linux/syscalls.h> | 24 | #include <linux/syscalls.h> |
| 25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
| 26 | #include <linux/ktime.h> | ||
| 26 | #include <asm/page.h> | 27 | #include <asm/page.h> |
| 27 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
| 28 | #include "lg.h" | 29 | #include "lg.h" |
| 29 | 30 | ||
| 30 | /*H:120 This is the core hypercall routine: where the Guest gets what it wants. | 31 | /*H:120 This is the core hypercall routine: where the Guest gets what it wants. |
| 31 | * Or gets killed. Or, in the case of LHCALL_CRASH, both. */ | 32 | * Or gets killed. Or, in the case of LHCALL_CRASH, both. */ |
| 32 | static void do_hcall(struct lguest *lg, struct hcall_args *args) | 33 | static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) |
| 33 | { | 34 | { |
| 34 | switch (args->arg0) { | 35 | switch (args->arg0) { |
| 35 | case LHCALL_FLUSH_ASYNC: | 36 | case LHCALL_FLUSH_ASYNC: |
| @@ -39,60 +40,62 @@ static void do_hcall(struct lguest *lg, struct hcall_args *args) | |||
| 39 | case LHCALL_LGUEST_INIT: | 40 | case LHCALL_LGUEST_INIT: |
| 40 | /* You can't get here unless you're already initialized. Don't | 41 | /* You can't get here unless you're already initialized. Don't |
| 41 | * do that. */ | 42 | * do that. */ |
| 42 | kill_guest(lg, "already have lguest_data"); | 43 | kill_guest(cpu, "already have lguest_data"); |
| 43 | break; | 44 | break; |
| 44 | case LHCALL_CRASH: { | 45 | case LHCALL_SHUTDOWN: { |
| 45 | /* Crash is such a trivial hypercall that we do it in four | 46 | /* Shutdown is such a trivial hypercall that we do it in four |
| 46 | * lines right here. */ | 47 | * lines right here. */ |
| 47 | char msg[128]; | 48 | char msg[128]; |
| 48 | /* If the lgread fails, it will call kill_guest() itself; the | 49 | /* If the lgread fails, it will call kill_guest() itself; the |
| 49 | * kill_guest() with the message will be ignored. */ | 50 | * kill_guest() with the message will be ignored. */ |
| 50 | __lgread(lg, msg, args->arg1, sizeof(msg)); | 51 | __lgread(cpu, msg, args->arg1, sizeof(msg)); |
| 51 | msg[sizeof(msg)-1] = '\0'; | 52 | msg[sizeof(msg)-1] = '\0'; |
| 52 | kill_guest(lg, "CRASH: %s", msg); | 53 | kill_guest(cpu, "CRASH: %s", msg); |
| 54 | if (args->arg2 == LGUEST_SHUTDOWN_RESTART) | ||
| 55 | cpu->lg->dead = ERR_PTR(-ERESTART); | ||
| 53 | break; | 56 | break; |
| 54 | } | 57 | } |
| 55 | case LHCALL_FLUSH_TLB: | 58 | case LHCALL_FLUSH_TLB: |
| 56 | /* FLUSH_TLB comes in two flavors, depending on the | 59 | /* FLUSH_TLB comes in two flavors, depending on the |
| 57 | * argument: */ | 60 | * argument: */ |
| 58 | if (args->arg1) | 61 | if (args->arg1) |
| 59 | guest_pagetable_clear_all(lg); | 62 | guest_pagetable_clear_all(cpu); |
| 60 | else | 63 | else |
| 61 | guest_pagetable_flush_user(lg); | 64 | guest_pagetable_flush_user(cpu); |
| 62 | break; | 65 | break; |
| 63 | 66 | ||
| 64 | /* All these calls simply pass the arguments through to the right | 67 | /* All these calls simply pass the arguments through to the right |
| 65 | * routines. */ | 68 | * routines. */ |
| 66 | case LHCALL_NEW_PGTABLE: | 69 | case LHCALL_NEW_PGTABLE: |
| 67 | guest_new_pagetable(lg, args->arg1); | 70 | guest_new_pagetable(cpu, args->arg1); |
| 68 | break; | 71 | break; |
| 69 | case LHCALL_SET_STACK: | 72 | case LHCALL_SET_STACK: |
| 70 | guest_set_stack(lg, args->arg1, args->arg2, args->arg3); | 73 | guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); |
| 71 | break; | 74 | break; |
| 72 | case LHCALL_SET_PTE: | 75 | case LHCALL_SET_PTE: |
| 73 | guest_set_pte(lg, args->arg1, args->arg2, __pte(args->arg3)); | 76 | guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); |
| 74 | break; | 77 | break; |
| 75 | case LHCALL_SET_PMD: | 78 | case LHCALL_SET_PMD: |
| 76 | guest_set_pmd(lg, args->arg1, args->arg2); | 79 | guest_set_pmd(cpu->lg, args->arg1, args->arg2); |
| 77 | break; | 80 | break; |
| 78 | case LHCALL_SET_CLOCKEVENT: | 81 | case LHCALL_SET_CLOCKEVENT: |
| 79 | guest_set_clockevent(lg, args->arg1); | 82 | guest_set_clockevent(cpu, args->arg1); |
| 80 | break; | 83 | break; |
| 81 | case LHCALL_TS: | 84 | case LHCALL_TS: |
| 82 | /* This sets the TS flag, as we saw used in run_guest(). */ | 85 | /* This sets the TS flag, as we saw used in run_guest(). */ |
| 83 | lg->ts = args->arg1; | 86 | cpu->ts = args->arg1; |
| 84 | break; | 87 | break; |
| 85 | case LHCALL_HALT: | 88 | case LHCALL_HALT: |
| 86 | /* Similarly, this sets the halted flag for run_guest(). */ | 89 | /* Similarly, this sets the halted flag for run_guest(). */ |
| 87 | lg->halted = 1; | 90 | cpu->halted = 1; |
| 88 | break; | 91 | break; |
| 89 | case LHCALL_NOTIFY: | 92 | case LHCALL_NOTIFY: |
| 90 | lg->pending_notify = args->arg1; | 93 | cpu->pending_notify = args->arg1; |
| 91 | break; | 94 | break; |
| 92 | default: | 95 | default: |
| 93 | /* It should be an architecture-specific hypercall. */ | 96 | /* It should be an architecture-specific hypercall. */ |
| 94 | if (lguest_arch_do_hcall(lg, args)) | 97 | if (lguest_arch_do_hcall(cpu, args)) |
| 95 | kill_guest(lg, "Bad hypercall %li\n", args->arg0); | 98 | kill_guest(cpu, "Bad hypercall %li\n", args->arg0); |
| 96 | } | 99 | } |
| 97 | } | 100 | } |
| 98 | /*:*/ | 101 | /*:*/ |
| @@ -104,13 +107,13 @@ static void do_hcall(struct lguest *lg, struct hcall_args *args) | |||
| 104 | * Guest put them in the ring, but we also promise the Guest that they will | 107 | * Guest put them in the ring, but we also promise the Guest that they will |
| 105 | * happen before any normal hypercall (which is why we check this before | 108 | * happen before any normal hypercall (which is why we check this before |
| 106 | * checking for a normal hcall). */ | 109 | * checking for a normal hcall). */ |
| 107 | static void do_async_hcalls(struct lguest *lg) | 110 | static void do_async_hcalls(struct lg_cpu *cpu) |
| 108 | { | 111 | { |
| 109 | unsigned int i; | 112 | unsigned int i; |
| 110 | u8 st[LHCALL_RING_SIZE]; | 113 | u8 st[LHCALL_RING_SIZE]; |
| 111 | 114 | ||
| 112 | /* For simplicity, we copy the entire call status array in at once. */ | 115 | /* For simplicity, we copy the entire call status array in at once. */ |
| 113 | if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st))) | 116 | if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st))) |
| 114 | return; | 117 | return; |
| 115 | 118 | ||
| 116 | /* We process "struct lguest_data"s hcalls[] ring once. */ | 119 | /* We process "struct lguest_data"s hcalls[] ring once. */ |
| @@ -119,7 +122,7 @@ static void do_async_hcalls(struct lguest *lg) | |||
| 119 | /* We remember where we were up to from last time. This makes | 122 | /* We remember where we were up to from last time. This makes |
| 120 | * sure that the hypercalls are done in the order the Guest | 123 | * sure that the hypercalls are done in the order the Guest |
| 121 | * places them in the ring. */ | 124 | * places them in the ring. */ |
| 122 | unsigned int n = lg->next_hcall; | 125 | unsigned int n = cpu->next_hcall; |
| 123 | 126 | ||
| 124 | /* 0xFF means there's no call here (yet). */ | 127 | /* 0xFF means there's no call here (yet). */ |
| 125 | if (st[n] == 0xFF) | 128 | if (st[n] == 0xFF) |
| @@ -127,65 +130,65 @@ static void do_async_hcalls(struct lguest *lg) | |||
| 127 | 130 | ||
| 128 | /* OK, we have hypercall. Increment the "next_hcall" cursor, | 131 | /* OK, we have hypercall. Increment the "next_hcall" cursor, |
| 129 | * and wrap back to 0 if we reach the end. */ | 132 | * and wrap back to 0 if we reach the end. */ |
| 130 | if (++lg->next_hcall == LHCALL_RING_SIZE) | 133 | if (++cpu->next_hcall == LHCALL_RING_SIZE) |
| 131 | lg->next_hcall = 0; | 134 | cpu->next_hcall = 0; |
| 132 | 135 | ||
| 133 | /* Copy the hypercall arguments into a local copy of | 136 | /* Copy the hypercall arguments into a local copy of |
| 134 | * the hcall_args struct. */ | 137 | * the hcall_args struct. */ |
| 135 | if (copy_from_user(&args, &lg->lguest_data->hcalls[n], | 138 | if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], |
| 136 | sizeof(struct hcall_args))) { | 139 | sizeof(struct hcall_args))) { |
| 137 | kill_guest(lg, "Fetching async hypercalls"); | 140 | kill_guest(cpu, "Fetching async hypercalls"); |
| 138 | break; | 141 | break; |
| 139 | } | 142 | } |
| 140 | 143 | ||
| 141 | /* Do the hypercall, same as a normal one. */ | 144 | /* Do the hypercall, same as a normal one. */ |
| 142 | do_hcall(lg, &args); | 145 | do_hcall(cpu, &args); |
| 143 | 146 | ||
| 144 | /* Mark the hypercall done. */ | 147 | /* Mark the hypercall done. */ |
| 145 | if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) { | 148 | if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) { |
| 146 | kill_guest(lg, "Writing result for async hypercall"); | 149 | kill_guest(cpu, "Writing result for async hypercall"); |
| 147 | break; | 150 | break; |
| 148 | } | 151 | } |
| 149 | 152 | ||
| 150 | /* Stop doing hypercalls if they want to notify the Launcher: | 153 | /* Stop doing hypercalls if they want to notify the Launcher: |
| 151 | * it needs to service this first. */ | 154 | * it needs to service this first. */ |
| 152 | if (lg->pending_notify) | 155 | if (cpu->pending_notify) |
| 153 | break; | 156 | break; |
| 154 | } | 157 | } |
| 155 | } | 158 | } |
| 156 | 159 | ||
| 157 | /* Last of all, we look at what happens first of all. The very first time the | 160 | /* Last of all, we look at what happens first of all. The very first time the |
| 158 | * Guest makes a hypercall, we end up here to set things up: */ | 161 | * Guest makes a hypercall, we end up here to set things up: */ |
| 159 | static void initialize(struct lguest *lg) | 162 | static void initialize(struct lg_cpu *cpu) |
| 160 | { | 163 | { |
| 161 | /* You can't do anything until you're initialized. The Guest knows the | 164 | /* You can't do anything until you're initialized. The Guest knows the |
| 162 | * rules, so we're unforgiving here. */ | 165 | * rules, so we're unforgiving here. */ |
| 163 | if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) { | 166 | if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { |
| 164 | kill_guest(lg, "hypercall %li before INIT", lg->hcall->arg0); | 167 | kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); |
| 165 | return; | 168 | return; |
| 166 | } | 169 | } |
| 167 | 170 | ||
| 168 | if (lguest_arch_init_hypercalls(lg)) | 171 | if (lguest_arch_init_hypercalls(cpu)) |
| 169 | kill_guest(lg, "bad guest page %p", lg->lguest_data); | 172 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
| 170 | 173 | ||
| 171 | /* The Guest tells us where we're not to deliver interrupts by putting | 174 | /* The Guest tells us where we're not to deliver interrupts by putting |
| 172 | * the range of addresses into "struct lguest_data". */ | 175 | * the range of addresses into "struct lguest_data". */ |
| 173 | if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start) | 176 | if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) |
| 174 | || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)) | 177 | || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) |
| 175 | kill_guest(lg, "bad guest page %p", lg->lguest_data); | 178 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
| 176 | 179 | ||
| 177 | /* We write the current time into the Guest's data page once so it can | 180 | /* We write the current time into the Guest's data page once so it can |
| 178 | * set its clock. */ | 181 | * set its clock. */ |
| 179 | write_timestamp(lg); | 182 | write_timestamp(cpu); |
| 180 | 183 | ||
| 181 | /* page_tables.c will also do some setup. */ | 184 | /* page_tables.c will also do some setup. */ |
| 182 | page_table_guest_data_init(lg); | 185 | page_table_guest_data_init(cpu); |
| 183 | 186 | ||
| 184 | /* This is the one case where the above accesses might have been the | 187 | /* This is the one case where the above accesses might have been the |
| 185 | * first write to a Guest page. This may have caused a copy-on-write | 188 | * first write to a Guest page. This may have caused a copy-on-write |
| 186 | * fault, but the old page might be (read-only) in the Guest | 189 | * fault, but the old page might be (read-only) in the Guest |
| 187 | * pagetable. */ | 190 | * pagetable. */ |
| 188 | guest_pagetable_clear_all(lg); | 191 | guest_pagetable_clear_all(cpu); |
| 189 | } | 192 | } |
| 190 | 193 | ||
| 191 | /*H:100 | 194 | /*H:100 |
| @@ -194,27 +197,27 @@ static void initialize(struct lguest *lg) | |||
| 194 | * Remember from the Guest, hypercalls come in two flavors: normal and | 197 | * Remember from the Guest, hypercalls come in two flavors: normal and |
| 195 | * asynchronous. This file handles both of types. | 198 | * asynchronous. This file handles both of types. |
| 196 | */ | 199 | */ |
| 197 | void do_hypercalls(struct lguest *lg) | 200 | void do_hypercalls(struct lg_cpu *cpu) |
| 198 | { | 201 | { |
| 199 | /* Not initialized yet? This hypercall must do it. */ | 202 | /* Not initialized yet? This hypercall must do it. */ |
| 200 | if (unlikely(!lg->lguest_data)) { | 203 | if (unlikely(!cpu->lg->lguest_data)) { |
| 201 | /* Set up the "struct lguest_data" */ | 204 | /* Set up the "struct lguest_data" */ |
| 202 | initialize(lg); | 205 | initialize(cpu); |
| 203 | /* Hcall is done. */ | 206 | /* Hcall is done. */ |
| 204 | lg->hcall = NULL; | 207 | cpu->hcall = NULL; |
| 205 | return; | 208 | return; |
| 206 | } | 209 | } |
| 207 | 210 | ||
| 208 | /* The Guest has initialized. | 211 | /* The Guest has initialized. |
| 209 | * | 212 | * |
| 210 | * Look in the hypercall ring for the async hypercalls: */ | 213 | * Look in the hypercall ring for the async hypercalls: */ |
| 211 | do_async_hcalls(lg); | 214 | do_async_hcalls(cpu); |
| 212 | 215 | ||
| 213 | /* If we stopped reading the hypercall ring because the Guest did a | 216 | /* If we stopped reading the hypercall ring because the Guest did a |
| 214 | * NOTIFY to the Launcher, we want to return now. Otherwise we do | 217 | * NOTIFY to the Launcher, we want to return now. Otherwise we do |
| 215 | * the hypercall. */ | 218 | * the hypercall. */ |
| 216 | if (!lg->pending_notify) { | 219 | if (!cpu->pending_notify) { |
| 217 | do_hcall(lg, lg->hcall); | 220 | do_hcall(cpu, cpu->hcall); |
| 218 | /* Tricky point: we reset the hcall pointer to mark the | 221 | /* Tricky point: we reset the hcall pointer to mark the |
| 219 | * hypercall as "done". We use the hcall pointer rather than | 222 | * hypercall as "done". We use the hcall pointer rather than |
| 220 | * the trap number to indicate a hypercall is pending. | 223 | * the trap number to indicate a hypercall is pending. |
| @@ -225,16 +228,17 @@ void do_hypercalls(struct lguest *lg) | |||
| 225 | * Launcher, the run_guest() loop will exit without running the | 228 | * Launcher, the run_guest() loop will exit without running the |
| 226 | * Guest. When it comes back it would try to re-run the | 229 | * Guest. When it comes back it would try to re-run the |
| 227 | * hypercall. */ | 230 | * hypercall. */ |
| 228 | lg->hcall = NULL; | 231 | cpu->hcall = NULL; |
| 229 | } | 232 | } |
| 230 | } | 233 | } |
| 231 | 234 | ||
| 232 | /* This routine supplies the Guest with time: it's used for wallclock time at | 235 | /* This routine supplies the Guest with time: it's used for wallclock time at |
| 233 | * initial boot and as a rough time source if the TSC isn't available. */ | 236 | * initial boot and as a rough time source if the TSC isn't available. */ |
| 234 | void write_timestamp(struct lguest *lg) | 237 | void write_timestamp(struct lg_cpu *cpu) |
| 235 | { | 238 | { |
| 236 | struct timespec now; | 239 | struct timespec now; |
| 237 | ktime_get_real_ts(&now); | 240 | ktime_get_real_ts(&now); |
| 238 | if (copy_to_user(&lg->lguest_data->time, &now, sizeof(struct timespec))) | 241 | if (copy_to_user(&cpu->lg->lguest_data->time, |
| 239 | kill_guest(lg, "Writing timestamp"); | 242 | &now, sizeof(struct timespec))) |
| 243 | kill_guest(cpu, "Writing timestamp"); | ||
| 240 | } | 244 | } |
