diff options
Diffstat (limited to 'arch/x86/xen/multicalls.c')
| -rw-r--r-- | arch/x86/xen/multicalls.c | 90 |
1 files changed, 90 insertions, 0 deletions
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c new file mode 100644 index 000000000000..c837e8e463db --- /dev/null +++ b/arch/x86/xen/multicalls.c | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | /* | ||
| 2 | * Xen hypercall batching. | ||
| 3 | * | ||
| 4 | * Xen allows multiple hypercalls to be issued at once, using the | ||
| 5 | * multicall interface. This allows the cost of trapping into the | ||
| 6 | * hypervisor to be amortized over several calls. | ||
| 7 | * | ||
| 8 | * This file implements a simple interface for multicalls. There's a | ||
| 9 | * per-cpu buffer of outstanding multicalls. When you want to queue a | ||
| 10 | * multicall for issuing, you can allocate a multicall slot for the | ||
| 11 | * call and its arguments, along with storage for space which is | ||
| 12 | * pointed to by the arguments (for passing pointers to structures, | ||
| 13 | * etc). When the multicall is actually issued, all the space for the | ||
| 14 | * commands and allocated memory is freed for reuse. | ||
| 15 | * | ||
| 16 | * Multicalls are flushed whenever any of the buffers get full, or | ||
| 17 | * when explicitly requested. There's no way to get per-multicall | ||
| 18 | * return results back. It will BUG if any of the multicalls fail. | ||
| 19 | * | ||
| 20 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | ||
| 21 | */ | ||
| 22 | #include <linux/percpu.h> | ||
| 23 | #include <linux/hardirq.h> | ||
| 24 | |||
| 25 | #include <asm/xen/hypercall.h> | ||
| 26 | |||
| 27 | #include "multicalls.h" | ||
| 28 | |||
| 29 | #define MC_BATCH 32 | ||
| 30 | #define MC_ARGS (MC_BATCH * 16 / sizeof(u64)) | ||
| 31 | |||
| 32 | struct mc_buffer { | ||
| 33 | struct multicall_entry entries[MC_BATCH]; | ||
| 34 | u64 args[MC_ARGS]; | ||
| 35 | unsigned mcidx, argidx; | ||
| 36 | }; | ||
| 37 | |||
| 38 | static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); | ||
| 39 | DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); | ||
| 40 | |||
| 41 | void xen_mc_flush(void) | ||
| 42 | { | ||
| 43 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | ||
| 44 | int ret = 0; | ||
| 45 | unsigned long flags; | ||
| 46 | |||
| 47 | BUG_ON(preemptible()); | ||
| 48 | |||
| 49 | /* Disable interrupts in case someone comes in and queues | ||
| 50 | something in the middle */ | ||
| 51 | local_irq_save(flags); | ||
| 52 | |||
| 53 | if (b->mcidx) { | ||
| 54 | int i; | ||
| 55 | |||
| 56 | if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) | ||
| 57 | BUG(); | ||
| 58 | for (i = 0; i < b->mcidx; i++) | ||
| 59 | if (b->entries[i].result < 0) | ||
| 60 | ret++; | ||
| 61 | b->mcidx = 0; | ||
| 62 | b->argidx = 0; | ||
| 63 | } else | ||
| 64 | BUG_ON(b->argidx != 0); | ||
| 65 | |||
| 66 | local_irq_restore(flags); | ||
| 67 | |||
| 68 | BUG_ON(ret); | ||
| 69 | } | ||
| 70 | |||
| 71 | struct multicall_space __xen_mc_entry(size_t args) | ||
| 72 | { | ||
| 73 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | ||
| 74 | struct multicall_space ret; | ||
| 75 | unsigned argspace = (args + sizeof(u64) - 1) / sizeof(u64); | ||
| 76 | |||
| 77 | BUG_ON(preemptible()); | ||
| 78 | BUG_ON(argspace > MC_ARGS); | ||
| 79 | |||
| 80 | if (b->mcidx == MC_BATCH || | ||
| 81 | (b->argidx + argspace) > MC_ARGS) | ||
| 82 | xen_mc_flush(); | ||
| 83 | |||
| 84 | ret.mc = &b->entries[b->mcidx]; | ||
| 85 | b->mcidx++; | ||
| 86 | ret.args = &b->args[b->argidx]; | ||
| 87 | b->argidx += argspace; | ||
| 88 | |||
| 89 | return ret; | ||
| 90 | } | ||
