diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-05-02 13:27:14 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:14 -0400 |
commit | f8822f42019eceed19cc6c0f985a489e17796ed8 (patch) | |
tree | d47728ad3a41343912e4556e6abd5cd79d75b3aa /include | |
parent | 42c24fa22e86365055fc931d833f26165e687c19 (diff) |
[PATCH] i386: PARAVIRT: Consistently wrap paravirt ops callsites to make them patchable
Wrap a set of interesting paravirt_ops calls in a wrapper which makes
the callsites available for patching. Unfortunately this is pretty
ugly because there's no way to get gcc to generate a function call,
but also wrap just the callsite itself with the necessary labels.
This patch supports functions with 0-4 arguments, and either void or
returning a value. 64-bit arguments must be split into a pair of
32-bit arguments (lower word first). Small structures are returned in
registers.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Anthony Liguori <anthony@codemonkey.ws>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-i386/paravirt.h | 686 |
1 files changed, 560 insertions, 126 deletions
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 87fd4317bee9..837457b42dbe 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -124,7 +124,7 @@ struct paravirt_ops | |||
124 | 124 | ||
125 | void (*flush_tlb_user)(void); | 125 | void (*flush_tlb_user)(void); |
126 | void (*flush_tlb_kernel)(void); | 126 | void (*flush_tlb_kernel)(void); |
127 | void (*flush_tlb_single)(u32 addr); | 127 | void (*flush_tlb_single)(unsigned long addr); |
128 | 128 | ||
129 | void (*map_pt_hook)(int type, pte_t *va, u32 pfn); | 129 | void (*map_pt_hook)(int type, pte_t *va, u32 pfn); |
130 | 130 | ||
@@ -188,7 +188,7 @@ extern struct paravirt_ops paravirt_ops; | |||
188 | #define paravirt_clobber(clobber) \ | 188 | #define paravirt_clobber(clobber) \ |
189 | [paravirt_clobber] "i" (clobber) | 189 | [paravirt_clobber] "i" (clobber) |
190 | 190 | ||
191 | #define PARAVIRT_CALL "call *paravirt_ops+%c[paravirt_typenum]*4;" | 191 | #define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);" |
192 | 192 | ||
193 | #define _paravirt_alt(insn_string, type, clobber) \ | 193 | #define _paravirt_alt(insn_string, type, clobber) \ |
194 | "771:\n\t" insn_string "\n" "772:\n" \ | 194 | "771:\n\t" insn_string "\n" "772:\n" \ |
@@ -199,26 +199,234 @@ extern struct paravirt_ops paravirt_ops; | |||
199 | " .short " clobber "\n" \ | 199 | " .short " clobber "\n" \ |
200 | ".popsection\n" | 200 | ".popsection\n" |
201 | 201 | ||
202 | #define paravirt_alt(insn_string) \ | 202 | #define paravirt_alt(insn_string) \ |
203 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | 203 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") |
204 | 204 | ||
205 | #define paravirt_enabled() (paravirt_ops.paravirt_enabled) | 205 | #define PVOP_CALL0(__rettype, __op) \ |
206 | ({ \ | ||
207 | __rettype __ret; \ | ||
208 | if (sizeof(__rettype) > sizeof(unsigned long)) { \ | ||
209 | unsigned long long __tmp; \ | ||
210 | unsigned long __ecx; \ | ||
211 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
212 | : "=A" (__tmp), "=c" (__ecx) \ | ||
213 | : paravirt_type(__op), \ | ||
214 | paravirt_clobber(CLBR_ANY) \ | ||
215 | : "memory", "cc"); \ | ||
216 | __ret = (__rettype)__tmp; \ | ||
217 | } else { \ | ||
218 | unsigned long __tmp, __edx, __ecx; \ | ||
219 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
220 | : "=a" (__tmp), "=d" (__edx), \ | ||
221 | "=c" (__ecx) \ | ||
222 | : paravirt_type(__op), \ | ||
223 | paravirt_clobber(CLBR_ANY) \ | ||
224 | : "memory", "cc"); \ | ||
225 | __ret = (__rettype)__tmp; \ | ||
226 | } \ | ||
227 | __ret; \ | ||
228 | }) | ||
229 | #define PVOP_VCALL0(__op) \ | ||
230 | ({ \ | ||
231 | unsigned long __eax, __edx, __ecx; \ | ||
232 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
233 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | ||
234 | : paravirt_type(__op), \ | ||
235 | paravirt_clobber(CLBR_ANY) \ | ||
236 | : "memory", "cc"); \ | ||
237 | }) | ||
238 | |||
239 | #define PVOP_CALL1(__rettype, __op, arg1) \ | ||
240 | ({ \ | ||
241 | __rettype __ret; \ | ||
242 | if (sizeof(__rettype) > sizeof(unsigned long)) { \ | ||
243 | unsigned long long __tmp; \ | ||
244 | unsigned long __ecx; \ | ||
245 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
246 | : "=A" (__tmp), "=c" (__ecx) \ | ||
247 | : "a" ((u32)(arg1)), \ | ||
248 | paravirt_type(__op), \ | ||
249 | paravirt_clobber(CLBR_ANY) \ | ||
250 | : "memory", "cc"); \ | ||
251 | __ret = (__rettype)__tmp; \ | ||
252 | } else { \ | ||
253 | unsigned long __tmp, __edx, __ecx; \ | ||
254 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
255 | : "=a" (__tmp), "=d" (__edx), \ | ||
256 | "=c" (__ecx) \ | ||
257 | : "0" ((u32)(arg1)), \ | ||
258 | paravirt_type(__op), \ | ||
259 | paravirt_clobber(CLBR_ANY) \ | ||
260 | : "memory", "cc"); \ | ||
261 | __ret = (__rettype)__tmp; \ | ||
262 | } \ | ||
263 | __ret; \ | ||
264 | }) | ||
265 | #define PVOP_VCALL1(__op, arg1) \ | ||
266 | ({ \ | ||
267 | unsigned long __eax, __edx, __ecx; \ | ||
268 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
269 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | ||
270 | : "0" ((u32)(arg1)), \ | ||
271 | paravirt_type(__op), \ | ||
272 | paravirt_clobber(CLBR_ANY) \ | ||
273 | : "memory", "cc"); \ | ||
274 | }) | ||
275 | |||
276 | #define PVOP_CALL2(__rettype, __op, arg1, arg2) \ | ||
277 | ({ \ | ||
278 | __rettype __ret; \ | ||
279 | if (sizeof(__rettype) > sizeof(unsigned long)) { \ | ||
280 | unsigned long long __tmp; \ | ||
281 | unsigned long __ecx; \ | ||
282 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
283 | : "=A" (__tmp), "=c" (__ecx) \ | ||
284 | : "a" ((u32)(arg1)), \ | ||
285 | "d" ((u32)(arg2)), \ | ||
286 | paravirt_type(__op), \ | ||
287 | paravirt_clobber(CLBR_ANY) \ | ||
288 | : "memory", "cc"); \ | ||
289 | __ret = (__rettype)__tmp; \ | ||
290 | } else { \ | ||
291 | unsigned long __tmp, __edx, __ecx; \ | ||
292 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
293 | : "=a" (__tmp), "=d" (__edx), \ | ||
294 | "=c" (__ecx) \ | ||
295 | : "0" ((u32)(arg1)), \ | ||
296 | "1" ((u32)(arg2)), \ | ||
297 | paravirt_type(__op), \ | ||
298 | paravirt_clobber(CLBR_ANY) \ | ||
299 | : "memory", "cc"); \ | ||
300 | __ret = (__rettype)__tmp; \ | ||
301 | } \ | ||
302 | __ret; \ | ||
303 | }) | ||
304 | #define PVOP_VCALL2(__op, arg1, arg2) \ | ||
305 | ({ \ | ||
306 | unsigned long __eax, __edx, __ecx; \ | ||
307 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
308 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | ||
309 | : "0" ((u32)(arg1)), \ | ||
310 | "1" ((u32)(arg2)), \ | ||
311 | paravirt_type(__op), \ | ||
312 | paravirt_clobber(CLBR_ANY) \ | ||
313 | : "memory", "cc"); \ | ||
314 | }) | ||
315 | |||
316 | #define PVOP_CALL3(__rettype, __op, arg1, arg2, arg3) \ | ||
317 | ({ \ | ||
318 | __rettype __ret; \ | ||
319 | if (sizeof(__rettype) > sizeof(unsigned long)) { \ | ||
320 | unsigned long long __tmp; \ | ||
321 | unsigned long __ecx; \ | ||
322 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
323 | : "=A" (__tmp), "=c" (__ecx) \ | ||
324 | : "a" ((u32)(arg1)), \ | ||
325 | "d" ((u32)(arg2)), \ | ||
326 | "1" ((u32)(arg3)), \ | ||
327 | paravirt_type(__op), \ | ||
328 | paravirt_clobber(CLBR_ANY) \ | ||
329 | : "memory", "cc"); \ | ||
330 | __ret = (__rettype)__tmp; \ | ||
331 | } else { \ | ||
332 | unsigned long __tmp, __edx, __ecx; \ | ||
333 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
334 | : "=a" (__tmp), "=d" (__edx), \ | ||
335 | "=c" (__ecx) \ | ||
336 | : "0" ((u32)(arg1)), \ | ||
337 | "1" ((u32)(arg2)), \ | ||
338 | "2" ((u32)(arg3)), \ | ||
339 | paravirt_type(__op), \ | ||
340 | paravirt_clobber(CLBR_ANY) \ | ||
341 | : "memory", "cc"); \ | ||
342 | __ret = (__rettype)__tmp; \ | ||
343 | } \ | ||
344 | __ret; \ | ||
345 | }) | ||
346 | #define PVOP_VCALL3(__op, arg1, arg2, arg3) \ | ||
347 | ({ \ | ||
348 | unsigned long __eax, __edx, __ecx; \ | ||
349 | asm volatile(paravirt_alt(PARAVIRT_CALL) \ | ||
350 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | ||
351 | : "0" ((u32)(arg1)), \ | ||
352 | "1" ((u32)(arg2)), \ | ||
353 | "2" ((u32)(arg3)), \ | ||
354 | paravirt_type(__op), \ | ||
355 | paravirt_clobber(CLBR_ANY) \ | ||
356 | : "memory", "cc"); \ | ||
357 | }) | ||
358 | |||
359 | #define PVOP_CALL4(__rettype, __op, arg1, arg2, arg3, arg4) \ | ||
360 | ({ \ | ||
361 | __rettype __ret; \ | ||
362 | if (sizeof(__rettype) > sizeof(unsigned long)) { \ | ||
363 | unsigned long long __tmp; \ | ||
364 | unsigned long __ecx; \ | ||
365 | asm volatile("push %[_arg4]; " \ | ||
366 | paravirt_alt(PARAVIRT_CALL) \ | ||
367 | "lea 4(%%esp),%%esp" \ | ||
368 | : "=A" (__tmp), "=c" (__ecx) \ | ||
369 | : "a" ((u32)(arg1)), \ | ||
370 | "d" ((u32)(arg2)), \ | ||
371 | "1" ((u32)(arg3)), \ | ||
372 | [_arg4] "mr" ((u32)(arg4)), \ | ||
373 | paravirt_type(__op), \ | ||
374 | paravirt_clobber(CLBR_ANY) \ | ||
375 | : "memory", "cc",); \ | ||
376 | __ret = (__rettype)__tmp; \ | ||
377 | } else { \ | ||
378 | unsigned long __tmp, __edx, __ecx; \ | ||
379 | asm volatile("push %[_arg4]; " \ | ||
380 | paravirt_alt(PARAVIRT_CALL) \ | ||
381 | "lea 4(%%esp),%%esp" \ | ||
382 | : "=a" (__tmp), "=d" (__edx), "=c" (__ecx) \ | ||
383 | : "0" ((u32)(arg1)), \ | ||
384 | "1" ((u32)(arg2)), \ | ||
385 | "2" ((u32)(arg3)), \ | ||
386 | [_arg4]"mr" ((u32)(arg4)), \ | ||
387 | paravirt_type(__op), \ | ||
388 | paravirt_clobber(CLBR_ANY) \ | ||
389 | : "memory", "cc"); \ | ||
390 | __ret = (__rettype)__tmp; \ | ||
391 | } \ | ||
392 | __ret; \ | ||
393 | }) | ||
394 | #define PVOP_VCALL4(__op, arg1, arg2, arg3, arg4) \ | ||
395 | ({ \ | ||
396 | unsigned long __eax, __edx, __ecx; \ | ||
397 | asm volatile("push %[_arg4]; " \ | ||
398 | paravirt_alt(PARAVIRT_CALL) \ | ||
399 | "lea 4(%%esp),%%esp" \ | ||
400 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | ||
401 | : "0" ((u32)(arg1)), \ | ||
402 | "1" ((u32)(arg2)), \ | ||
403 | "2" ((u32)(arg3)), \ | ||
404 | [_arg4]"mr" ((u32)(arg4)), \ | ||
405 | paravirt_type(__op), \ | ||
406 | paravirt_clobber(CLBR_ANY) \ | ||
407 | : "memory", "cc"); \ | ||
408 | }) | ||
409 | |||
410 | static inline int paravirt_enabled(void) | ||
411 | { | ||
412 | return paravirt_ops.paravirt_enabled; | ||
413 | } | ||
206 | 414 | ||
207 | static inline void load_esp0(struct tss_struct *tss, | 415 | static inline void load_esp0(struct tss_struct *tss, |
208 | struct thread_struct *thread) | 416 | struct thread_struct *thread) |
209 | { | 417 | { |
210 | paravirt_ops.load_esp0(tss, thread); | 418 | PVOP_VCALL2(load_esp0, tss, thread); |
211 | } | 419 | } |
212 | 420 | ||
213 | #define ARCH_SETUP paravirt_ops.arch_setup(); | 421 | #define ARCH_SETUP paravirt_ops.arch_setup(); |
214 | static inline unsigned long get_wallclock(void) | 422 | static inline unsigned long get_wallclock(void) |
215 | { | 423 | { |
216 | return paravirt_ops.get_wallclock(); | 424 | return PVOP_CALL0(unsigned long, get_wallclock); |
217 | } | 425 | } |
218 | 426 | ||
219 | static inline int set_wallclock(unsigned long nowtime) | 427 | static inline int set_wallclock(unsigned long nowtime) |
220 | { | 428 | { |
221 | return paravirt_ops.set_wallclock(nowtime); | 429 | return PVOP_CALL1(int, set_wallclock, nowtime); |
222 | } | 430 | } |
223 | 431 | ||
224 | static inline void (*choose_time_init(void))(void) | 432 | static inline void (*choose_time_init(void))(void) |
@@ -230,127 +438,208 @@ static inline void (*choose_time_init(void))(void) | |||
230 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | 438 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
231 | unsigned int *ecx, unsigned int *edx) | 439 | unsigned int *ecx, unsigned int *edx) |
232 | { | 440 | { |
233 | paravirt_ops.cpuid(eax, ebx, ecx, edx); | 441 | PVOP_VCALL4(cpuid, eax, ebx, ecx, edx); |
234 | } | 442 | } |
235 | 443 | ||
236 | /* | 444 | /* |
237 | * These special macros can be used to get or set a debugging register | 445 | * These special macros can be used to get or set a debugging register |
238 | */ | 446 | */ |
239 | #define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg) | 447 | static inline unsigned long paravirt_get_debugreg(int reg) |
240 | #define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val) | 448 | { |
449 | return PVOP_CALL1(unsigned long, get_debugreg, reg); | ||
450 | } | ||
451 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) | ||
452 | static inline void set_debugreg(unsigned long val, int reg) | ||
453 | { | ||
454 | PVOP_VCALL2(set_debugreg, reg, val); | ||
455 | } | ||
241 | 456 | ||
242 | #define clts() paravirt_ops.clts() | 457 | static inline void clts(void) |
458 | { | ||
459 | PVOP_VCALL0(clts); | ||
460 | } | ||
243 | 461 | ||
244 | #define read_cr0() paravirt_ops.read_cr0() | 462 | static inline unsigned long read_cr0(void) |
245 | #define write_cr0(x) paravirt_ops.write_cr0(x) | 463 | { |
464 | return PVOP_CALL0(unsigned long, read_cr0); | ||
465 | } | ||
246 | 466 | ||
247 | #define read_cr2() paravirt_ops.read_cr2() | 467 | static inline void write_cr0(unsigned long x) |
248 | #define write_cr2(x) paravirt_ops.write_cr2(x) | 468 | { |
469 | PVOP_VCALL1(write_cr0, x); | ||
470 | } | ||
471 | |||
472 | static inline unsigned long read_cr2(void) | ||
473 | { | ||
474 | return PVOP_CALL0(unsigned long, read_cr2); | ||
475 | } | ||
476 | |||
477 | static inline void write_cr2(unsigned long x) | ||
478 | { | ||
479 | PVOP_VCALL1(write_cr2, x); | ||
480 | } | ||
481 | |||
482 | static inline unsigned long read_cr3(void) | ||
483 | { | ||
484 | return PVOP_CALL0(unsigned long, read_cr3); | ||
485 | } | ||
249 | 486 | ||
250 | #define read_cr3() paravirt_ops.read_cr3() | 487 | static inline void write_cr3(unsigned long x) |
251 | #define write_cr3(x) paravirt_ops.write_cr3(x) | 488 | { |
489 | PVOP_VCALL1(write_cr3, x); | ||
490 | } | ||
252 | 491 | ||
253 | #define read_cr4() paravirt_ops.read_cr4() | 492 | static inline unsigned long read_cr4(void) |
254 | #define read_cr4_safe(x) paravirt_ops.read_cr4_safe() | 493 | { |
255 | #define write_cr4(x) paravirt_ops.write_cr4(x) | 494 | return PVOP_CALL0(unsigned long, read_cr4); |
495 | } | ||
496 | static inline unsigned long read_cr4_safe(void) | ||
497 | { | ||
498 | return PVOP_CALL0(unsigned long, read_cr4_safe); | ||
499 | } | ||
256 | 500 | ||
257 | #define raw_ptep_get_and_clear(xp) (paravirt_ops.ptep_get_and_clear(xp)) | 501 | static inline void write_cr4(unsigned long x) |
502 | { | ||
503 | PVOP_VCALL1(write_cr4, x); | ||
504 | } | ||
258 | 505 | ||
259 | static inline void raw_safe_halt(void) | 506 | static inline void raw_safe_halt(void) |
260 | { | 507 | { |
261 | paravirt_ops.safe_halt(); | 508 | PVOP_VCALL0(safe_halt); |
262 | } | 509 | } |
263 | 510 | ||
264 | static inline void halt(void) | 511 | static inline void halt(void) |
265 | { | 512 | { |
266 | paravirt_ops.safe_halt(); | 513 | PVOP_VCALL0(safe_halt); |
514 | } | ||
515 | |||
516 | static inline void wbinvd(void) | ||
517 | { | ||
518 | PVOP_VCALL0(wbinvd); | ||
267 | } | 519 | } |
268 | #define wbinvd() paravirt_ops.wbinvd() | ||
269 | 520 | ||
270 | #define get_kernel_rpl() (paravirt_ops.kernel_rpl) | 521 | #define get_kernel_rpl() (paravirt_ops.kernel_rpl) |
271 | 522 | ||
523 | static inline u64 paravirt_read_msr(unsigned msr, int *err) | ||
524 | { | ||
525 | return PVOP_CALL2(u64, read_msr, msr, err); | ||
526 | } | ||
527 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | ||
528 | { | ||
529 | return PVOP_CALL3(int, write_msr, msr, low, high); | ||
530 | } | ||
531 | |||
272 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 532 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
273 | #define rdmsr(msr,val1,val2) do { \ | 533 | #define rdmsr(msr,val1,val2) do { \ |
274 | int _err; \ | 534 | int _err; \ |
275 | u64 _l = paravirt_ops.read_msr(msr,&_err); \ | 535 | u64 _l = paravirt_read_msr(msr, &_err); \ |
276 | val1 = (u32)_l; \ | 536 | val1 = (u32)_l; \ |
277 | val2 = _l >> 32; \ | 537 | val2 = _l >> 32; \ |
278 | } while(0) | 538 | } while(0) |
279 | 539 | ||
280 | #define wrmsr(msr,val1,val2) do { \ | 540 | #define wrmsr(msr,val1,val2) do { \ |
281 | u64 _l = ((u64)(val2) << 32) | (val1); \ | 541 | paravirt_write_msr(msr, val1, val2); \ |
282 | paravirt_ops.write_msr((msr), _l); \ | ||
283 | } while(0) | 542 | } while(0) |
284 | 543 | ||
285 | #define rdmsrl(msr,val) do { \ | 544 | #define rdmsrl(msr,val) do { \ |
286 | int _err; \ | 545 | int _err; \ |
287 | val = paravirt_ops.read_msr((msr),&_err); \ | 546 | val = paravirt_read_msr(msr, &_err); \ |
288 | } while(0) | 547 | } while(0) |
289 | 548 | ||
290 | #define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val))) | 549 | #define wrmsrl(msr,val) ((void)paravirt_write_msr(msr, val, 0)) |
291 | #define wrmsr_safe(msr,a,b) ({ \ | 550 | #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) |
292 | u64 _l = ((u64)(b) << 32) | (a); \ | ||
293 | paravirt_ops.write_msr((msr),_l); \ | ||
294 | }) | ||
295 | 551 | ||
296 | /* rdmsr with exception handling */ | 552 | /* rdmsr with exception handling */ |
297 | #define rdmsr_safe(msr,a,b) ({ \ | 553 | #define rdmsr_safe(msr,a,b) ({ \ |
298 | int _err; \ | 554 | int _err; \ |
299 | u64 _l = paravirt_ops.read_msr(msr,&_err); \ | 555 | u64 _l = paravirt_read_msr(msr, &_err); \ |
300 | (*a) = (u32)_l; \ | 556 | (*a) = (u32)_l; \ |
301 | (*b) = _l >> 32; \ | 557 | (*b) = _l >> 32; \ |
302 | _err; }) | 558 | _err; }) |
303 | 559 | ||
304 | #define rdtsc(low,high) do { \ | 560 | |
305 | u64 _l = paravirt_ops.read_tsc(); \ | 561 | static inline u64 paravirt_read_tsc(void) |
306 | low = (u32)_l; \ | 562 | { |
307 | high = _l >> 32; \ | 563 | return PVOP_CALL0(u64, read_tsc); |
564 | } | ||
565 | #define rdtsc(low,high) do { \ | ||
566 | u64 _l = paravirt_read_tsc(); \ | ||
567 | low = (u32)_l; \ | ||
568 | high = _l >> 32; \ | ||
308 | } while(0) | 569 | } while(0) |
309 | 570 | ||
310 | #define rdtscl(low) do { \ | 571 | #define rdtscl(low) do { \ |
311 | u64 _l = paravirt_ops.read_tsc(); \ | 572 | u64 _l = paravirt_read_tsc(); \ |
312 | low = (int)_l; \ | 573 | low = (int)_l; \ |
313 | } while(0) | 574 | } while(0) |
314 | 575 | ||
315 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) | 576 | #define rdtscll(val) (val = paravirt_read_tsc()) |
316 | 577 | ||
317 | #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) | 578 | #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles()) |
318 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) | 579 | #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) |
319 | 580 | ||
320 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 581 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
321 | 582 | ||
322 | #define rdpmc(counter,low,high) do { \ | 583 | static inline unsigned long long paravirt_read_pmc(int counter) |
323 | u64 _l = paravirt_ops.read_pmc(); \ | 584 | { |
324 | low = (u32)_l; \ | 585 | return PVOP_CALL1(u64, read_pmc, counter); |
325 | high = _l >> 32; \ | 586 | } |
326 | } while(0) | ||
327 | 587 | ||
328 | #define load_TR_desc() (paravirt_ops.load_tr_desc()) | 588 | #define rdpmc(counter,low,high) do { \ |
329 | #define load_gdt(dtr) (paravirt_ops.load_gdt(dtr)) | 589 | u64 _l = paravirt_read_pmc(counter); \ |
330 | #define load_idt(dtr) (paravirt_ops.load_idt(dtr)) | 590 | low = (u32)_l; \ |
331 | #define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries))) | 591 | high = _l >> 32; \ |
332 | #define store_gdt(dtr) (paravirt_ops.store_gdt(dtr)) | 592 | } while(0) |
333 | #define store_idt(dtr) (paravirt_ops.store_idt(dtr)) | ||
334 | #define store_tr(tr) ((tr) = paravirt_ops.store_tr()) | ||
335 | #define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu))) | ||
336 | #define write_ldt_entry(dt, entry, low, high) \ | ||
337 | (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high))) | ||
338 | #define write_gdt_entry(dt, entry, low, high) \ | ||
339 | (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high))) | ||
340 | #define write_idt_entry(dt, entry, low, high) \ | ||
341 | (paravirt_ops.write_idt_entry((dt), (entry), (low), (high))) | ||
342 | #define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask)) | ||
343 | |||
344 | #define __pte(x) paravirt_ops.make_pte(x) | ||
345 | #define __pgd(x) paravirt_ops.make_pgd(x) | ||
346 | |||
347 | #define pte_val(x) paravirt_ops.pte_val(x) | ||
348 | #define pgd_val(x) paravirt_ops.pgd_val(x) | ||
349 | 593 | ||
350 | #ifdef CONFIG_X86_PAE | 594 | static inline void load_TR_desc(void) |
351 | #define __pmd(x) paravirt_ops.make_pmd(x) | 595 | { |
352 | #define pmd_val(x) paravirt_ops.pmd_val(x) | 596 | PVOP_VCALL0(load_tr_desc); |
353 | #endif | 597 | } |
598 | static inline void load_gdt(const struct Xgt_desc_struct *dtr) | ||
599 | { | ||
600 | PVOP_VCALL1(load_gdt, dtr); | ||
601 | } | ||
602 | static inline void load_idt(const struct Xgt_desc_struct *dtr) | ||
603 | { | ||
604 | PVOP_VCALL1(load_idt, dtr); | ||
605 | } | ||
606 | static inline void set_ldt(const void *addr, unsigned entries) | ||
607 | { | ||
608 | PVOP_VCALL2(set_ldt, addr, entries); | ||
609 | } | ||
610 | static inline void store_gdt(struct Xgt_desc_struct *dtr) | ||
611 | { | ||
612 | PVOP_VCALL1(store_gdt, dtr); | ||
613 | } | ||
614 | static inline void store_idt(struct Xgt_desc_struct *dtr) | ||
615 | { | ||
616 | PVOP_VCALL1(store_idt, dtr); | ||
617 | } | ||
618 | static inline unsigned long paravirt_store_tr(void) | ||
619 | { | ||
620 | return PVOP_CALL0(unsigned long, store_tr); | ||
621 | } | ||
622 | #define store_tr(tr) ((tr) = paravirt_store_tr()) | ||
623 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) | ||
624 | { | ||
625 | PVOP_VCALL2(load_tls, t, cpu); | ||
626 | } | ||
627 | static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) | ||
628 | { | ||
629 | PVOP_VCALL4(write_ldt_entry, dt, entry, low, high); | ||
630 | } | ||
631 | static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) | ||
632 | { | ||
633 | PVOP_VCALL4(write_gdt_entry, dt, entry, low, high); | ||
634 | } | ||
635 | static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) | ||
636 | { | ||
637 | PVOP_VCALL4(write_idt_entry, dt, entry, low, high); | ||
638 | } | ||
639 | static inline void set_iopl_mask(unsigned mask) | ||
640 | { | ||
641 | PVOP_VCALL1(set_iopl_mask, mask); | ||
642 | } | ||
354 | 643 | ||
355 | /* The paravirtualized I/O functions */ | 644 | /* The paravirtualized I/O functions */ |
356 | static inline void slow_down_io(void) { | 645 | static inline void slow_down_io(void) { |
@@ -368,27 +657,27 @@ static inline void slow_down_io(void) { | |||
368 | */ | 657 | */ |
369 | static inline void apic_write(unsigned long reg, unsigned long v) | 658 | static inline void apic_write(unsigned long reg, unsigned long v) |
370 | { | 659 | { |
371 | paravirt_ops.apic_write(reg,v); | 660 | PVOP_VCALL2(apic_write, reg, v); |
372 | } | 661 | } |
373 | 662 | ||
374 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | 663 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) |
375 | { | 664 | { |
376 | paravirt_ops.apic_write_atomic(reg,v); | 665 | PVOP_VCALL2(apic_write_atomic, reg, v); |
377 | } | 666 | } |
378 | 667 | ||
379 | static inline unsigned long apic_read(unsigned long reg) | 668 | static inline unsigned long apic_read(unsigned long reg) |
380 | { | 669 | { |
381 | return paravirt_ops.apic_read(reg); | 670 | return PVOP_CALL1(unsigned long, apic_read, reg); |
382 | } | 671 | } |
383 | 672 | ||
384 | static inline void setup_boot_clock(void) | 673 | static inline void setup_boot_clock(void) |
385 | { | 674 | { |
386 | paravirt_ops.setup_boot_clock(); | 675 | PVOP_VCALL0(setup_boot_clock); |
387 | } | 676 | } |
388 | 677 | ||
389 | static inline void setup_secondary_clock(void) | 678 | static inline void setup_secondary_clock(void) |
390 | { | 679 | { |
391 | paravirt_ops.setup_secondary_clock(); | 680 | PVOP_VCALL0(setup_secondary_clock); |
392 | } | 681 | } |
393 | #endif | 682 | #endif |
394 | 683 | ||
@@ -408,93 +697,205 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) | |||
408 | static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | 697 | static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, |
409 | unsigned long start_esp) | 698 | unsigned long start_esp) |
410 | { | 699 | { |
411 | return paravirt_ops.startup_ipi_hook(phys_apicid, start_eip, start_esp); | 700 | PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp); |
412 | } | 701 | } |
413 | #endif | 702 | #endif |
414 | 703 | ||
415 | static inline void paravirt_activate_mm(struct mm_struct *prev, | 704 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
416 | struct mm_struct *next) | 705 | struct mm_struct *next) |
417 | { | 706 | { |
418 | paravirt_ops.activate_mm(prev, next); | 707 | PVOP_VCALL2(activate_mm, prev, next); |
419 | } | 708 | } |
420 | 709 | ||
421 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | 710 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
422 | struct mm_struct *mm) | 711 | struct mm_struct *mm) |
423 | { | 712 | { |
424 | paravirt_ops.dup_mmap(oldmm, mm); | 713 | PVOP_VCALL2(dup_mmap, oldmm, mm); |
425 | } | 714 | } |
426 | 715 | ||
427 | static inline void arch_exit_mmap(struct mm_struct *mm) | 716 | static inline void arch_exit_mmap(struct mm_struct *mm) |
428 | { | 717 | { |
429 | paravirt_ops.exit_mmap(mm); | 718 | PVOP_VCALL1(exit_mmap, mm); |
430 | } | 719 | } |
431 | 720 | ||
432 | #define __flush_tlb() paravirt_ops.flush_tlb_user() | 721 | static inline void __flush_tlb(void) |
433 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() | 722 | { |
434 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) | 723 | PVOP_VCALL0(flush_tlb_user); |
724 | } | ||
725 | static inline void __flush_tlb_global(void) | ||
726 | { | ||
727 | PVOP_VCALL0(flush_tlb_kernel); | ||
728 | } | ||
729 | static inline void __flush_tlb_single(unsigned long addr) | ||
730 | { | ||
731 | PVOP_VCALL1(flush_tlb_single, addr); | ||
732 | } | ||
435 | 733 | ||
436 | #define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn) | 734 | static inline void paravirt_map_pt_hook(int type, pte_t *va, u32 pfn) |
735 | { | ||
736 | PVOP_VCALL3(map_pt_hook, type, va, pfn); | ||
737 | } | ||
437 | 738 | ||
438 | #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) | 739 | static inline void paravirt_alloc_pt(unsigned pfn) |
439 | #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) | 740 | { |
741 | PVOP_VCALL1(alloc_pt, pfn); | ||
742 | } | ||
743 | static inline void paravirt_release_pt(unsigned pfn) | ||
744 | { | ||
745 | PVOP_VCALL1(release_pt, pfn); | ||
746 | } | ||
440 | 747 | ||
441 | #define paravirt_alloc_pd(pfn) paravirt_ops.alloc_pd(pfn) | 748 | static inline void paravirt_alloc_pd(unsigned pfn) |
442 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) \ | 749 | { |
443 | paravirt_ops.alloc_pd_clone(pfn, clonepfn, start, count) | 750 | PVOP_VCALL1(alloc_pd, pfn); |
444 | #define paravirt_release_pd(pfn) paravirt_ops.release_pd(pfn) | 751 | } |
445 | 752 | ||
446 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 753 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, |
754 | unsigned start, unsigned count) | ||
755 | { | ||
756 | PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count); | ||
757 | } | ||
758 | static inline void paravirt_release_pd(unsigned pfn) | ||
447 | { | 759 | { |
448 | paravirt_ops.set_pte(ptep, pteval); | 760 | PVOP_VCALL1(release_pd, pfn); |
449 | } | 761 | } |
450 | 762 | ||
451 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 763 | static inline void pte_update(struct mm_struct *mm, unsigned long addr, |
452 | pte_t *ptep, pte_t pteval) | 764 | pte_t *ptep) |
453 | { | 765 | { |
454 | paravirt_ops.set_pte_at(mm, addr, ptep, pteval); | 766 | PVOP_VCALL3(pte_update, mm, addr, ptep); |
455 | } | 767 | } |
456 | 768 | ||
457 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 769 | static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, |
770 | pte_t *ptep) | ||
458 | { | 771 | { |
459 | paravirt_ops.set_pmd(pmdp, pmdval); | 772 | PVOP_VCALL3(pte_update_defer, mm, addr, ptep); |
460 | } | 773 | } |
461 | 774 | ||
462 | static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep) | 775 | #ifdef CONFIG_X86_PAE |
776 | static inline pte_t __pte(unsigned long long val) | ||
463 | { | 777 | { |
464 | paravirt_ops.pte_update(mm, addr, ptep); | 778 | unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte, |
779 | val, val >> 32); | ||
780 | return (pte_t) { ret, ret >> 32 }; | ||
465 | } | 781 | } |
466 | 782 | ||
467 | static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep) | 783 | static inline pmd_t __pmd(unsigned long long val) |
468 | { | 784 | { |
469 | paravirt_ops.pte_update_defer(mm, addr, ptep); | 785 | return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) }; |
786 | } | ||
787 | |||
788 | static inline pgd_t __pgd(unsigned long long val) | ||
789 | { | ||
790 | return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) }; | ||
791 | } | ||
792 | |||
793 | static inline unsigned long long pte_val(pte_t x) | ||
794 | { | ||
795 | return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high); | ||
796 | } | ||
797 | |||
798 | static inline unsigned long long pmd_val(pmd_t x) | ||
799 | { | ||
800 | return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32); | ||
801 | } | ||
802 | |||
803 | static inline unsigned long long pgd_val(pgd_t x) | ||
804 | { | ||
805 | return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32); | ||
806 | } | ||
807 | |||
808 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
809 | { | ||
810 | PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high); | ||
811 | } | ||
812 | |||
813 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
814 | pte_t *ptep, pte_t pteval) | ||
815 | { | ||
816 | /* 5 arg words */ | ||
817 | paravirt_ops.set_pte_at(mm, addr, ptep, pteval); | ||
470 | } | 818 | } |
471 | 819 | ||
472 | #ifdef CONFIG_X86_PAE | ||
473 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | 820 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) |
474 | { | 821 | { |
475 | paravirt_ops.set_pte_atomic(ptep, pteval); | 822 | PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high); |
476 | } | 823 | } |
477 | 824 | ||
478 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | 825 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, |
826 | pte_t *ptep, pte_t pte) | ||
479 | { | 827 | { |
828 | /* 5 arg words */ | ||
480 | paravirt_ops.set_pte_present(mm, addr, ptep, pte); | 829 | paravirt_ops.set_pte_present(mm, addr, ptep, pte); |
481 | } | 830 | } |
482 | 831 | ||
832 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
833 | { | ||
834 | PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32); | ||
835 | } | ||
836 | |||
483 | static inline void set_pud(pud_t *pudp, pud_t pudval) | 837 | static inline void set_pud(pud_t *pudp, pud_t pudval) |
484 | { | 838 | { |
485 | paravirt_ops.set_pud(pudp, pudval); | 839 | PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32); |
486 | } | 840 | } |
487 | 841 | ||
488 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 842 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
489 | { | 843 | { |
490 | paravirt_ops.pte_clear(mm, addr, ptep); | 844 | PVOP_VCALL3(pte_clear, mm, addr, ptep); |
491 | } | 845 | } |
492 | 846 | ||
493 | static inline void pmd_clear(pmd_t *pmdp) | 847 | static inline void pmd_clear(pmd_t *pmdp) |
494 | { | 848 | { |
495 | paravirt_ops.pmd_clear(pmdp); | 849 | PVOP_VCALL1(pmd_clear, pmdp); |
850 | } | ||
851 | |||
852 | static inline pte_t raw_ptep_get_and_clear(pte_t *p) | ||
853 | { | ||
854 | unsigned long long val = PVOP_CALL1(unsigned long long, ptep_get_and_clear, p); | ||
855 | return (pte_t) { val, val >> 32 }; | ||
856 | } | ||
857 | #else /* !CONFIG_X86_PAE */ | ||
858 | static inline pte_t __pte(unsigned long val) | ||
859 | { | ||
860 | return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; | ||
496 | } | 861 | } |
497 | #endif | 862 | |
863 | static inline pgd_t __pgd(unsigned long val) | ||
864 | { | ||
865 | return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) }; | ||
866 | } | ||
867 | |||
868 | static inline unsigned long pte_val(pte_t x) | ||
869 | { | ||
870 | return PVOP_CALL1(unsigned long, pte_val, x.pte_low); | ||
871 | } | ||
872 | |||
873 | static inline unsigned long pgd_val(pgd_t x) | ||
874 | { | ||
875 | return PVOP_CALL1(unsigned long, pgd_val, x.pgd); | ||
876 | } | ||
877 | |||
878 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
879 | { | ||
880 | PVOP_VCALL2(set_pte, ptep, pteval.pte_low); | ||
881 | } | ||
882 | |||
883 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
884 | pte_t *ptep, pte_t pteval) | ||
885 | { | ||
886 | PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low); | ||
887 | } | ||
888 | |||
889 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
890 | { | ||
891 | PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); | ||
892 | } | ||
893 | |||
894 | static inline pte_t raw_ptep_get_and_clear(pte_t *p) | ||
895 | { | ||
896 | return (pte_t) { PVOP_CALL1(unsigned long, ptep_get_and_clear, p) }; | ||
897 | } | ||
898 | #endif /* CONFIG_X86_PAE */ | ||
498 | 899 | ||
499 | /* Lazy mode for batching updates / context switch */ | 900 | /* Lazy mode for batching updates / context switch */ |
500 | #define PARAVIRT_LAZY_NONE 0 | 901 | #define PARAVIRT_LAZY_NONE 0 |
@@ -503,14 +904,37 @@ static inline void pmd_clear(pmd_t *pmdp) | |||
503 | #define PARAVIRT_LAZY_FLUSH 3 | 904 | #define PARAVIRT_LAZY_FLUSH 3 |
504 | 905 | ||
505 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 906 | #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE |
506 | #define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU) | 907 | static inline void arch_enter_lazy_cpu_mode(void) |
507 | #define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) | 908 | { |
508 | #define arch_flush_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH) | 909 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU); |
910 | } | ||
911 | |||
912 | static inline void arch_leave_lazy_cpu_mode(void) | ||
913 | { | ||
914 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); | ||
915 | } | ||
916 | |||
917 | static inline void arch_flush_lazy_cpu_mode(void) | ||
918 | { | ||
919 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); | ||
920 | } | ||
921 | |||
509 | 922 | ||
510 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 923 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
511 | #define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU) | 924 | static inline void arch_enter_lazy_mmu_mode(void) |
512 | #define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) | 925 | { |
513 | #define arch_flush_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH) | 926 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU); |
927 | } | ||
928 | |||
929 | static inline void arch_leave_lazy_mmu_mode(void) | ||
930 | { | ||
931 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); | ||
932 | } | ||
933 | |||
934 | static inline void arch_flush_lazy_mmu_mode(void) | ||
935 | { | ||
936 | PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); | ||
937 | } | ||
514 | 938 | ||
515 | void _paravirt_nop(void); | 939 | void _paravirt_nop(void); |
516 | #define paravirt_nop ((void *)_paravirt_nop) | 940 | #define paravirt_nop ((void *)_paravirt_nop) |
@@ -603,6 +1027,16 @@ static inline unsigned long __raw_local_irq_save(void) | |||
603 | paravirt_clobber(CLBR_EAX) | 1027 | paravirt_clobber(CLBR_EAX) |
604 | 1028 | ||
605 | #undef PARAVIRT_CALL | 1029 | #undef PARAVIRT_CALL |
1030 | #undef PVOP_VCALL0 | ||
1031 | #undef PVOP_CALL0 | ||
1032 | #undef PVOP_VCALL1 | ||
1033 | #undef PVOP_CALL1 | ||
1034 | #undef PVOP_VCALL2 | ||
1035 | #undef PVOP_CALL2 | ||
1036 | #undef PVOP_VCALL3 | ||
1037 | #undef PVOP_CALL3 | ||
1038 | #undef PVOP_VCALL4 | ||
1039 | #undef PVOP_CALL4 | ||
606 | 1040 | ||
607 | #else /* __ASSEMBLY__ */ | 1041 | #else /* __ASSEMBLY__ */ |
608 | 1042 | ||