diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-05 14:39:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-05 14:39:00 -0500 |
commit | 221d46841b931d0e6b11e6251e482f2afe3974dd (patch) | |
tree | feb33999f71a84003f4ac752300c81f47f9e272f /arch/x86 | |
parent | 4d20826ffb6fa80c71b85d2cb858ae400a59a4d5 (diff) | |
parent | 633872b980f55f40a5e7de374f26970e41e2137b (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest:
lguest: tidy up documentation
kernel/futex.c: make 3 functions static
unexport access_process_vm
lguest: make async_hcall() static
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/lguest/boot.c | 69 |
1 files changed, 34 insertions, 35 deletions
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a55b0902f9d3..92c56117eae5 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -93,38 +93,7 @@ struct lguest_data lguest_data = { | |||
93 | }; | 93 | }; |
94 | static cycle_t clock_base; | 94 | static cycle_t clock_base; |
95 | 95 | ||
96 | /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first | 96 | /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a |
97 | * real optimization trick! | ||
98 | * | ||
99 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do | ||
100 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls | ||
101 | * are reasonably expensive, batching them up makes sense. For example, a | ||
102 | * large munmap might update dozens of page table entries: that code calls | ||
103 | * paravirt_enter_lazy_mmu(), does the dozen updates, then calls | ||
104 | * lguest_leave_lazy_mode(). | ||
105 | * | ||
106 | * So, when we're in lazy mode, we call async_hypercall() to store the call for | ||
107 | * future processing. When lazy mode is turned off we issue a hypercall to | ||
108 | * flush the stored calls. | ||
109 | */ | ||
110 | static void lguest_leave_lazy_mode(void) | ||
111 | { | ||
112 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | ||
113 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); | ||
114 | } | ||
115 | |||
116 | static void lazy_hcall(unsigned long call, | ||
117 | unsigned long arg1, | ||
118 | unsigned long arg2, | ||
119 | unsigned long arg3) | ||
120 | { | ||
121 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | ||
122 | hcall(call, arg1, arg2, arg3); | ||
123 | else | ||
124 | async_hcall(call, arg1, arg2, arg3); | ||
125 | } | ||
126 | |||
127 | /* async_hcall() is pretty simple: I'm quite proud of it really. We have a | ||
128 | * ring buffer of stored hypercalls which the Host will run though next time we | 97 | * ring buffer of stored hypercalls which the Host will run though next time we |
129 | * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall | 98 | * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall |
130 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, | 99 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, |
@@ -134,8 +103,8 @@ static void lazy_hcall(unsigned long call, | |||
134 | * full and we just make the hypercall directly. This has the nice side | 103 | * full and we just make the hypercall directly. This has the nice side |
135 | * effect of causing the Host to run all the stored calls in the ring buffer | 104 | * effect of causing the Host to run all the stored calls in the ring buffer |
136 | * which empties it for next time! */ | 105 | * which empties it for next time! */ |
137 | void async_hcall(unsigned long call, | 106 | static void async_hcall(unsigned long call, unsigned long arg1, |
138 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 107 | unsigned long arg2, unsigned long arg3) |
139 | { | 108 | { |
140 | /* Note: This code assumes we're uniprocessor. */ | 109 | /* Note: This code assumes we're uniprocessor. */ |
141 | static unsigned int next_call; | 110 | static unsigned int next_call; |
@@ -161,7 +130,37 @@ void async_hcall(unsigned long call, | |||
161 | } | 130 | } |
162 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
163 | } | 132 | } |
164 | /*:*/ | 133 | |
134 | /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first | ||
135 | * real optimization trick! | ||
136 | * | ||
137 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do | ||
138 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls | ||
139 | * are reasonably expensive, batching them up makes sense. For example, a | ||
140 | * large munmap might update dozens of page table entries: that code calls | ||
141 | * paravirt_enter_lazy_mmu(), does the dozen updates, then calls | ||
142 | * lguest_leave_lazy_mode(). | ||
143 | * | ||
144 | * So, when we're in lazy mode, we call async_hcall() to store the call for | ||
145 | * future processing. */ | ||
146 | static void lazy_hcall(unsigned long call, | ||
147 | unsigned long arg1, | ||
148 | unsigned long arg2, | ||
149 | unsigned long arg3) | ||
150 | { | ||
151 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | ||
152 | hcall(call, arg1, arg2, arg3); | ||
153 | else | ||
154 | async_hcall(call, arg1, arg2, arg3); | ||
155 | } | ||
156 | |||
157 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then | ||
158 | * issue a hypercall to flush any stored calls. */ | ||
159 | static void lguest_leave_lazy_mode(void) | ||
160 | { | ||
161 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | ||
162 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); | ||
163 | } | ||
165 | 164 | ||
166 | /*G:033 | 165 | /*G:033 |
167 | * After that diversion we return to our first native-instruction | 166 | * After that diversion we return to our first native-instruction |