diff options
-rw-r--r-- | arch/x86/xen/mmu.c | 55 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.c | 40 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.h | 12 |
3 files changed, 81 insertions, 26 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 846dad7d54a5..f6b8225c2a0b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -230,18 +230,35 @@ static bool page_pinned(void *ptr) | |||
230 | return PagePinned(page); | 230 | return PagePinned(page); |
231 | } | 231 | } |
232 | 232 | ||
233 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | 233 | static void extend_mmu_update(const struct mmu_update *update) |
234 | { | 234 | { |
235 | struct multicall_space mcs; | 235 | struct multicall_space mcs; |
236 | struct mmu_update *u; | 236 | struct mmu_update *u; |
237 | 237 | ||
238 | preempt_disable(); | 238 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); |
239 | |||
240 | if (mcs.mc != NULL) | ||
241 | mcs.mc->args[1]++; | ||
242 | else { | ||
243 | mcs = __xen_mc_entry(sizeof(*u)); | ||
244 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | ||
245 | } | ||
239 | 246 | ||
240 | mcs = xen_mc_entry(sizeof(*u)); | ||
241 | u = mcs.args; | 247 | u = mcs.args; |
242 | u->ptr = virt_to_machine(ptr).maddr; | 248 | *u = *update; |
243 | u->val = pmd_val_ma(val); | 249 | } |
244 | MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); | 250 | |
251 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | ||
252 | { | ||
253 | struct mmu_update u; | ||
254 | |||
255 | preempt_disable(); | ||
256 | |||
257 | xen_mc_batch(); | ||
258 | |||
259 | u.ptr = virt_to_machine(ptr).maddr; | ||
260 | u.val = pmd_val_ma(val); | ||
261 | extend_mmu_update(&u); | ||
245 | 262 | ||
246 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 263 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
247 | 264 | ||
@@ -332,14 +349,13 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t | |||
332 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | 349 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
333 | pte_t *ptep, pte_t pte) | 350 | pte_t *ptep, pte_t pte) |
334 | { | 351 | { |
335 | struct multicall_space mcs; | 352 | struct mmu_update u; |
336 | struct mmu_update *u; | ||
337 | 353 | ||
338 | mcs = xen_mc_entry(sizeof(*u)); | 354 | xen_mc_batch(); |
339 | u = mcs.args; | 355 | |
340 | u->ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 356 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
341 | u->val = pte_val_ma(pte); | 357 | u.val = pte_val_ma(pte); |
342 | MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); | 358 | extend_mmu_update(&u); |
343 | 359 | ||
344 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 360 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
345 | } | 361 | } |
@@ -396,16 +412,15 @@ pmdval_t xen_pmd_val(pmd_t pmd) | |||
396 | 412 | ||
397 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) | 413 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
398 | { | 414 | { |
399 | struct multicall_space mcs; | 415 | struct mmu_update u; |
400 | struct mmu_update *u; | ||
401 | 416 | ||
402 | preempt_disable(); | 417 | preempt_disable(); |
403 | 418 | ||
404 | mcs = xen_mc_entry(sizeof(*u)); | 419 | xen_mc_batch(); |
405 | u = mcs.args; | 420 | |
406 | u->ptr = virt_to_machine(ptr).maddr; | 421 | u.ptr = virt_to_machine(ptr).maddr; |
407 | u->val = pud_val_ma(val); | 422 | u.val = pud_val_ma(val); |
408 | MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF); | 423 | extend_mmu_update(&u); |
409 | 424 | ||
410 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 425 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
411 | 426 | ||
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 5791eb2e3750..3c63c4da7ed1 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
@@ -29,14 +29,14 @@ | |||
29 | #define MC_DEBUG 1 | 29 | #define MC_DEBUG 1 |
30 | 30 | ||
31 | #define MC_BATCH 32 | 31 | #define MC_BATCH 32 |
32 | #define MC_ARGS (MC_BATCH * 16 / sizeof(u64)) | 32 | #define MC_ARGS (MC_BATCH * 16) |
33 | 33 | ||
34 | struct mc_buffer { | 34 | struct mc_buffer { |
35 | struct multicall_entry entries[MC_BATCH]; | 35 | struct multicall_entry entries[MC_BATCH]; |
36 | #if MC_DEBUG | 36 | #if MC_DEBUG |
37 | struct multicall_entry debug[MC_BATCH]; | 37 | struct multicall_entry debug[MC_BATCH]; |
38 | #endif | 38 | #endif |
39 | u64 args[MC_ARGS]; | 39 | unsigned char args[MC_ARGS]; |
40 | struct callback { | 40 | struct callback { |
41 | void (*fn)(void *); | 41 | void (*fn)(void *); |
42 | void *data; | 42 | void *data; |
@@ -107,20 +107,48 @@ struct multicall_space __xen_mc_entry(size_t args) | |||
107 | { | 107 | { |
108 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 108 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); |
109 | struct multicall_space ret; | 109 | struct multicall_space ret; |
110 | unsigned argspace = (args + sizeof(u64) - 1) / sizeof(u64); | 110 | unsigned argidx = roundup(b->argidx, sizeof(u64)); |
111 | 111 | ||
112 | BUG_ON(preemptible()); | 112 | BUG_ON(preemptible()); |
113 | BUG_ON(argspace > MC_ARGS); | 113 | BUG_ON(b->argidx > MC_ARGS); |
114 | 114 | ||
115 | if (b->mcidx == MC_BATCH || | 115 | if (b->mcidx == MC_BATCH || |
116 | (b->argidx + argspace) > MC_ARGS) | 116 | (argidx + args) > MC_ARGS) { |
117 | xen_mc_flush(); | 117 | xen_mc_flush(); |
118 | argidx = roundup(b->argidx, sizeof(u64)); | ||
119 | } | ||
118 | 120 | ||
119 | ret.mc = &b->entries[b->mcidx]; | 121 | ret.mc = &b->entries[b->mcidx]; |
120 | b->mcidx++; | 122 | b->mcidx++; |
123 | ret.args = &b->args[argidx]; | ||
124 | b->argidx = argidx + args; | ||
125 | |||
126 | BUG_ON(b->argidx > MC_ARGS); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) | ||
131 | { | ||
132 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | ||
133 | struct multicall_space ret = { NULL, NULL }; | ||
134 | |||
135 | BUG_ON(preemptible()); | ||
136 | BUG_ON(b->argidx > MC_ARGS); | ||
137 | |||
138 | if (b->mcidx == 0) | ||
139 | return ret; | ||
140 | |||
141 | if (b->entries[b->mcidx - 1].op != op) | ||
142 | return ret; | ||
143 | |||
144 | if ((b->argidx + size) > MC_ARGS) | ||
145 | return ret; | ||
146 | |||
147 | ret.mc = &b->entries[b->mcidx - 1]; | ||
121 | ret.args = &b->args[b->argidx]; | 148 | ret.args = &b->args[b->argidx]; |
122 | b->argidx += argspace; | 149 | b->argidx += size; |
123 | 150 | ||
151 | BUG_ON(b->argidx > MC_ARGS); | ||
124 | return ret; | 152 | return ret; |
125 | } | 153 | } |
126 | 154 | ||
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 8bae996d99a3..858938241616 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -45,4 +45,16 @@ static inline void xen_mc_issue(unsigned mode) | |||
45 | /* Set up a callback to be called when the current batch is flushed */ | 45 | /* Set up a callback to be called when the current batch is flushed */ |
46 | void xen_mc_callback(void (*fn)(void *), void *data); | 46 | void xen_mc_callback(void (*fn)(void *), void *data); |
47 | 47 | ||
48 | /* | ||
49 | * Try to extend the arguments of the previous multicall command. The | ||
50 | * previous command's op must match. If it does, then it attempts to | ||
51 | * extend the argument space allocated to the multicall entry by | ||
52 | * arg_size bytes. | ||
53 | * | ||
54 | * The returned multicall_space will return with mc pointing to the | ||
55 | * command on success, or NULL on failure, and args pointing to the | ||
56 | * newly allocated space. | ||
57 | */ | ||
58 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size); | ||
59 | |||
48 | #endif /* _XEN_MULTICALLS_H */ | 60 | #endif /* _XEN_MULTICALLS_H */ |