aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-05 14:39:00 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-05 14:39:00 -0500
commit221d46841b931d0e6b11e6251e482f2afe3974dd (patch)
treefeb33999f71a84003f4ac752300c81f47f9e272f
parent4d20826ffb6fa80c71b85d2cb858ae400a59a4d5 (diff)
parent633872b980f55f40a5e7de374f26970e41e2137b (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-lguest: lguest: tidy up documentation kernel/futex.c: make 3 functions static unexport access_process_vm lguest: make async_hcall() static
-rw-r--r--arch/x86/lguest/boot.c69
-rw-r--r--include/asm-x86/lguest_hcall.h3
-rw-r--r--include/linux/futex.h4
-rw-r--r--kernel/futex.c11
-rw-r--r--mm/memory.c1
5 files changed, 38 insertions, 50 deletions
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index a55b0902f9d3..92c56117eae5 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -93,38 +93,7 @@ struct lguest_data lguest_data = {
93}; 93};
94static cycle_t clock_base; 94static cycle_t clock_base;
95 95
96/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first 96/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
97 * real optimization trick!
98 *
99 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
100 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
101 * are reasonably expensive, batching them up makes sense. For example, a
102 * large munmap might update dozens of page table entries: that code calls
103 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
104 * lguest_leave_lazy_mode().
105 *
106 * So, when we're in lazy mode, we call async_hypercall() to store the call for
107 * future processing. When lazy mode is turned off we issue a hypercall to
108 * flush the stored calls.
109 */
110static void lguest_leave_lazy_mode(void)
111{
112 paravirt_leave_lazy(paravirt_get_lazy_mode());
113 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
114}
115
116static void lazy_hcall(unsigned long call,
117 unsigned long arg1,
118 unsigned long arg2,
119 unsigned long arg3)
120{
121 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
122 hcall(call, arg1, arg2, arg3);
123 else
124 async_hcall(call, arg1, arg2, arg3);
125}
126
127/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
128 * ring buffer of stored hypercalls which the Host will run though next time we 97 * ring buffer of stored hypercalls which the Host will run though next time we
129 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall 98 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
130 * arguments, and a "hcall_status" word which is 0 if the call is ready to go, 99 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
@@ -134,8 +103,8 @@ static void lazy_hcall(unsigned long call,
134 * full and we just make the hypercall directly. This has the nice side 103 * full and we just make the hypercall directly. This has the nice side
135 * effect of causing the Host to run all the stored calls in the ring buffer 104 * effect of causing the Host to run all the stored calls in the ring buffer
136 * which empties it for next time! */ 105 * which empties it for next time! */
137void async_hcall(unsigned long call, 106static void async_hcall(unsigned long call, unsigned long arg1,
138 unsigned long arg1, unsigned long arg2, unsigned long arg3) 107 unsigned long arg2, unsigned long arg3)
139{ 108{
140 /* Note: This code assumes we're uniprocessor. */ 109 /* Note: This code assumes we're uniprocessor. */
141 static unsigned int next_call; 110 static unsigned int next_call;
@@ -161,7 +130,37 @@ void async_hcall(unsigned long call,
161 } 130 }
162 local_irq_restore(flags); 131 local_irq_restore(flags);
163} 132}
164/*:*/ 133
134/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
135 * real optimization trick!
136 *
137 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
138 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
139 * are reasonably expensive, batching them up makes sense. For example, a
140 * large munmap might update dozens of page table entries: that code calls
141 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
142 * lguest_leave_lazy_mode().
143 *
144 * So, when we're in lazy mode, we call async_hcall() to store the call for
145 * future processing. */
146static void lazy_hcall(unsigned long call,
147 unsigned long arg1,
148 unsigned long arg2,
149 unsigned long arg3)
150{
151 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
152 hcall(call, arg1, arg2, arg3);
153 else
154 async_hcall(call, arg1, arg2, arg3);
155}
156
157/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
158 * issue a hypercall to flush any stored calls. */
159static void lguest_leave_lazy_mode(void)
160{
161 paravirt_leave_lazy(paravirt_get_lazy_mode());
162 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
163}
165 164
166/*G:033 165/*G:033
167 * After that diversion we return to our first native-instruction 166 * After that diversion we return to our first native-instruction
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index 9c5092b6aa9f..2091779e91fb 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -54,9 +54,6 @@ hcall(unsigned long call,
54} 54}
55/*:*/ 55/*:*/
56 56
57void async_hcall(unsigned long call,
58 unsigned long arg1, unsigned long arg2, unsigned long arg3);
59
60/* Can't use our min() macro here: needs to be a constant */ 57/* Can't use our min() macro here: needs to be a constant */
61#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) 58#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
62 59
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 99650353adfa..92d420fe03f8 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -149,10 +149,6 @@ union futex_key {
149 int offset; 149 int offset;
150 } both; 150 } both;
151}; 151};
152int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared,
153 union futex_key *key);
154void get_futex_key_refs(union futex_key *key);
155void drop_futex_key_refs(union futex_key *key);
156 152
157#ifdef CONFIG_FUTEX 153#ifdef CONFIG_FUTEX
158extern void exit_robust_list(struct task_struct *curr); 154extern void exit_robust_list(struct task_struct *curr);
diff --git a/kernel/futex.c b/kernel/futex.c
index 32710451dc20..9dc591ab681a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -181,8 +181,8 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
181 * For other futexes, it points to &current->mm->mmap_sem and 181 * For other futexes, it points to &current->mm->mmap_sem and
182 * caller must have taken the reader lock. but NOT any spinlocks. 182 * caller must have taken the reader lock. but NOT any spinlocks.
183 */ 183 */
184int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, 184static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
185 union futex_key *key) 185 union futex_key *key)
186{ 186{
187 unsigned long address = (unsigned long)uaddr; 187 unsigned long address = (unsigned long)uaddr;
188 struct mm_struct *mm = current->mm; 188 struct mm_struct *mm = current->mm;
@@ -268,14 +268,13 @@ int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
268 } 268 }
269 return err; 269 return err;
270} 270}
271EXPORT_SYMBOL_GPL(get_futex_key);
272 271
273/* 272/*
274 * Take a reference to the resource addressed by a key. 273 * Take a reference to the resource addressed by a key.
275 * Can be called while holding spinlocks. 274 * Can be called while holding spinlocks.
276 * 275 *
277 */ 276 */
278inline void get_futex_key_refs(union futex_key *key) 277static void get_futex_key_refs(union futex_key *key)
279{ 278{
280 if (key->both.ptr == 0) 279 if (key->both.ptr == 0)
281 return; 280 return;
@@ -288,13 +287,12 @@ inline void get_futex_key_refs(union futex_key *key)
288 break; 287 break;
289 } 288 }
290} 289}
291EXPORT_SYMBOL_GPL(get_futex_key_refs);
292 290
293/* 291/*
294 * Drop a reference to the resource addressed by a key. 292 * Drop a reference to the resource addressed by a key.
295 * The hash bucket spinlock must not be held. 293 * The hash bucket spinlock must not be held.
296 */ 294 */
297void drop_futex_key_refs(union futex_key *key) 295static void drop_futex_key_refs(union futex_key *key)
298{ 296{
299 if (!key->both.ptr) 297 if (!key->both.ptr)
300 return; 298 return;
@@ -307,7 +305,6 @@ void drop_futex_key_refs(union futex_key *key)
307 break; 305 break;
308 } 306 }
309} 307}
310EXPORT_SYMBOL_GPL(drop_futex_key_refs);
311 308
312static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) 309static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
313{ 310{
diff --git a/mm/memory.c b/mm/memory.c
index eefd5b68bc42..9791e4786843 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2748,4 +2748,3 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
2748 2748
2749 return buf - old_buf; 2749 return buf - old_buf;
2750} 2750}
2751EXPORT_SYMBOL_GPL(access_process_vm);