aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 13:27:15 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:15 -0400
commit1a45b7aaa5051489b46afbc48509bd91f8b4a1ba (patch)
tree2e0f39a0f801b33a9fbccca2afe1beb9cfaa82be /include
parent4e0fa85602a4fa219fc3a9c053d5140bf987d3e3 (diff)
[PATCH] i386: PARAVIRT: Clean up paravirt patchable wrappers
Replace all the open-coded macros for generating calls with a pair of more general macros (__PVOP_CALL/VCALL), and redefine all the PVOP_V?CALL[0-4] in terms of them. [ Andrew, Andi: this should slot in immediately after "Document asm-i386/paravirt.h" (paravirt_ops-document-asm-i386-paravirth.patch) ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/paravirt.h248
1 files changed, 60 insertions, 188 deletions
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index c5451923c79e..2ba18963e114 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -332,211 +332,81 @@ unsigned paravirt_patch_insns(void *site, unsigned len,
332 * means that all uses must be wrapped in inline functions. This also 332 * means that all uses must be wrapped in inline functions. This also
333 * makes sure the incoming and outgoing types are always correct. 333 * makes sure the incoming and outgoing types are always correct.
334 */ 334 */
335#define PVOP_CALL0(__rettype, __op) \ 335#define __PVOP_CALL(rettype, op, pre, post, ...) \
336 ({ \
337 __rettype __ret; \
338 if (sizeof(__rettype) > sizeof(unsigned long)) { \
339 unsigned long long __tmp; \
340 unsigned long __ecx; \
341 asm volatile(paravirt_alt(PARAVIRT_CALL) \
342 : "=A" (__tmp), "=c" (__ecx) \
343 : paravirt_type(__op), \
344 paravirt_clobber(CLBR_ANY) \
345 : "memory", "cc"); \
346 __ret = (__rettype)__tmp; \
347 } else { \
348 unsigned long __tmp, __edx, __ecx; \
349 asm volatile(paravirt_alt(PARAVIRT_CALL) \
350 : "=a" (__tmp), "=d" (__edx), \
351 "=c" (__ecx) \
352 : paravirt_type(__op), \
353 paravirt_clobber(CLBR_ANY) \
354 : "memory", "cc"); \
355 __ret = (__rettype)__tmp; \
356 } \
357 __ret; \
358 })
359#define PVOP_VCALL0(__op) \
360 ({ \ 336 ({ \
337 rettype __ret; \
361 unsigned long __eax, __edx, __ecx; \ 338 unsigned long __eax, __edx, __ecx; \
362 asm volatile(paravirt_alt(PARAVIRT_CALL) \ 339 if (sizeof(rettype) > sizeof(unsigned long)) { \
363 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ 340 asm volatile(pre \
364 : paravirt_type(__op), \ 341 paravirt_alt(PARAVIRT_CALL) \
365 paravirt_clobber(CLBR_ANY) \ 342 post \
366 : "memory", "cc"); \ 343 : "=a" (__eax), "=d" (__edx), \
367 })
368
369#define PVOP_CALL1(__rettype, __op, arg1) \
370 ({ \
371 __rettype __ret; \
372 if (sizeof(__rettype) > sizeof(unsigned long)) { \
373 unsigned long long __tmp; \
374 unsigned long __ecx; \
375 asm volatile(paravirt_alt(PARAVIRT_CALL) \
376 : "=A" (__tmp), "=c" (__ecx) \
377 : "a" ((u32)(arg1)), \
378 paravirt_type(__op), \
379 paravirt_clobber(CLBR_ANY) \
380 : "memory", "cc"); \
381 __ret = (__rettype)__tmp; \
382 } else { \
383 unsigned long __tmp, __edx, __ecx; \
384 asm volatile(paravirt_alt(PARAVIRT_CALL) \
385 : "=a" (__tmp), "=d" (__edx), \
386 "=c" (__ecx) \
387 : "0" ((u32)(arg1)), \
388 paravirt_type(__op), \
389 paravirt_clobber(CLBR_ANY) \
390 : "memory", "cc"); \
391 __ret = (__rettype)__tmp; \
392 } \
393 __ret; \
394 })
395#define PVOP_VCALL1(__op, arg1) \
396 ({ \
397 unsigned long __eax, __edx, __ecx; \
398 asm volatile(paravirt_alt(PARAVIRT_CALL) \
399 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
400 : "0" ((u32)(arg1)), \
401 paravirt_type(__op), \
402 paravirt_clobber(CLBR_ANY) \
403 : "memory", "cc"); \
404 })
405
406#define PVOP_CALL2(__rettype, __op, arg1, arg2) \
407 ({ \
408 __rettype __ret; \
409 if (sizeof(__rettype) > sizeof(unsigned long)) { \
410 unsigned long long __tmp; \
411 unsigned long __ecx; \
412 asm volatile(paravirt_alt(PARAVIRT_CALL) \
413 : "=A" (__tmp), "=c" (__ecx) \
414 : "a" ((u32)(arg1)), \
415 "d" ((u32)(arg2)), \
416 paravirt_type(__op), \
417 paravirt_clobber(CLBR_ANY) \
418 : "memory", "cc"); \
419 __ret = (__rettype)__tmp; \
420 } else { \
421 unsigned long __tmp, __edx, __ecx; \
422 asm volatile(paravirt_alt(PARAVIRT_CALL) \
423 : "=a" (__tmp), "=d" (__edx), \
424 "=c" (__ecx) \
425 : "0" ((u32)(arg1)), \
426 "1" ((u32)(arg2)), \
427 paravirt_type(__op), \
428 paravirt_clobber(CLBR_ANY) \
429 : "memory", "cc"); \
430 __ret = (__rettype)__tmp; \
431 } \
432 __ret; \
433 })
434#define PVOP_VCALL2(__op, arg1, arg2) \
435 ({ \
436 unsigned long __eax, __edx, __ecx; \
437 asm volatile(paravirt_alt(PARAVIRT_CALL) \
438 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
439 : "0" ((u32)(arg1)), \
440 "1" ((u32)(arg2)), \
441 paravirt_type(__op), \
442 paravirt_clobber(CLBR_ANY) \
443 : "memory", "cc"); \
444 })
445
446#define PVOP_CALL3(__rettype, __op, arg1, arg2, arg3) \
447 ({ \
448 __rettype __ret; \
449 if (sizeof(__rettype) > sizeof(unsigned long)) { \
450 unsigned long long __tmp; \
451 unsigned long __ecx; \
452 asm volatile(paravirt_alt(PARAVIRT_CALL) \
453 : "=A" (__tmp), "=c" (__ecx) \
454 : "a" ((u32)(arg1)), \
455 "d" ((u32)(arg2)), \
456 "1" ((u32)(arg3)), \
457 paravirt_type(__op), \
458 paravirt_clobber(CLBR_ANY) \
459 : "memory", "cc"); \
460 __ret = (__rettype)__tmp; \
461 } else { \
462 unsigned long __tmp, __edx, __ecx; \
463 asm volatile(paravirt_alt(PARAVIRT_CALL) \
464 : "=a" (__tmp), "=d" (__edx), \
465 "=c" (__ecx) \ 344 "=c" (__ecx) \
466 : "0" ((u32)(arg1)), \ 345 : paravirt_type(op), \
467 "1" ((u32)(arg2)), \ 346 paravirt_clobber(CLBR_ANY), \
468 "2" ((u32)(arg3)), \ 347 ##__VA_ARGS__ \
469 paravirt_type(__op), \
470 paravirt_clobber(CLBR_ANY) \
471 : "memory", "cc"); \ 348 : "memory", "cc"); \
472 __ret = (__rettype)__tmp; \ 349 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
473 } \
474 __ret; \
475 })
476#define PVOP_VCALL3(__op, arg1, arg2, arg3) \
477 ({ \
478 unsigned long __eax, __edx, __ecx; \
479 asm volatile(paravirt_alt(PARAVIRT_CALL) \
480 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
481 : "0" ((u32)(arg1)), \
482 "1" ((u32)(arg2)), \
483 "2" ((u32)(arg3)), \
484 paravirt_type(__op), \
485 paravirt_clobber(CLBR_ANY) \
486 : "memory", "cc"); \
487 })
488
489#define PVOP_CALL4(__rettype, __op, arg1, arg2, arg3, arg4) \
490 ({ \
491 __rettype __ret; \
492 if (sizeof(__rettype) > sizeof(unsigned long)) { \
493 unsigned long long __tmp; \
494 unsigned long __ecx; \
495 asm volatile("push %[_arg4]; " \
496 paravirt_alt(PARAVIRT_CALL) \
497 "lea 4(%%esp),%%esp" \
498 : "=A" (__tmp), "=c" (__ecx) \
499 : "a" ((u32)(arg1)), \
500 "d" ((u32)(arg2)), \
501 "1" ((u32)(arg3)), \
502 [_arg4] "mr" ((u32)(arg4)), \
503 paravirt_type(__op), \
504 paravirt_clobber(CLBR_ANY) \
505 : "memory", "cc",); \
506 __ret = (__rettype)__tmp; \
507 } else { \ 350 } else { \
508 unsigned long __tmp, __edx, __ecx; \ 351 asm volatile(pre \
509 asm volatile("push %[_arg4]; " \
510 paravirt_alt(PARAVIRT_CALL) \ 352 paravirt_alt(PARAVIRT_CALL) \
511 "lea 4(%%esp),%%esp" \ 353 post \
512 : "=a" (__tmp), "=d" (__edx), "=c" (__ecx) \ 354 : "=a" (__eax), "=d" (__edx), \
513 : "0" ((u32)(arg1)), \ 355 "=c" (__ecx) \
514 "1" ((u32)(arg2)), \ 356 : paravirt_type(op), \
515 "2" ((u32)(arg3)), \ 357 paravirt_clobber(CLBR_ANY), \
516 [_arg4]"mr" ((u32)(arg4)), \ 358 ##__VA_ARGS__ \
517 paravirt_type(__op), \
518 paravirt_clobber(CLBR_ANY) \
519 : "memory", "cc"); \ 359 : "memory", "cc"); \
520 __ret = (__rettype)__tmp; \ 360 __ret = (rettype)__eax; \
521 } \ 361 } \
522 __ret; \ 362 __ret; \
523 }) 363 })
524#define PVOP_VCALL4(__op, arg1, arg2, arg3, arg4) \ 364#define __PVOP_VCALL(op, pre, post, ...) \
525 ({ \ 365 ({ \
526 unsigned long __eax, __edx, __ecx; \ 366 unsigned long __eax, __edx, __ecx; \
527 asm volatile("push %[_arg4]; " \ 367 asm volatile(pre \
528 paravirt_alt(PARAVIRT_CALL) \ 368 paravirt_alt(PARAVIRT_CALL) \
529 "lea 4(%%esp),%%esp" \ 369 post \
530 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ 370 : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
531 : "0" ((u32)(arg1)), \ 371 : paravirt_type(op), \
532 "1" ((u32)(arg2)), \ 372 paravirt_clobber(CLBR_ANY), \
533 "2" ((u32)(arg3)), \ 373 ##__VA_ARGS__ \
534 [_arg4]"mr" ((u32)(arg4)), \
535 paravirt_type(__op), \
536 paravirt_clobber(CLBR_ANY) \
537 : "memory", "cc"); \ 374 : "memory", "cc"); \
538 }) 375 })
539 376
377#define PVOP_CALL0(rettype, op) \
378 __PVOP_CALL(rettype, op, "", "")
379#define PVOP_VCALL0(op) \
380 __PVOP_VCALL(op, "", "")
381
382#define PVOP_CALL1(rettype, op, arg1) \
383 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)))
384#define PVOP_VCALL1(op, arg1) \
385 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)))
386
387#define PVOP_CALL2(rettype, op, arg1, arg2) \
388 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2)))
389#define PVOP_VCALL2(op, arg1, arg2) \
390 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2)))
391
392#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
393 __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \
394 "1"((u32)(arg2)), "2"((u32)(arg3)))
395#define PVOP_VCALL3(op, arg1, arg2, arg3) \
396 __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \
397 "2"((u32)(arg3)))
398
399#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
400 __PVOP_CALL(rettype, op, \
401 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
402 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
403 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
404#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
405 __PVOP_VCALL(op, \
406 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
407 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
408 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
409
540static inline int paravirt_enabled(void) 410static inline int paravirt_enabled(void)
541{ 411{
542 return paravirt_ops.paravirt_enabled; 412 return paravirt_ops.paravirt_enabled;
@@ -1162,6 +1032,8 @@ static inline unsigned long __raw_local_irq_save(void)
1162 1032
1163/* Make sure as little as possible of this mess escapes. */ 1033/* Make sure as little as possible of this mess escapes. */
1164#undef PARAVIRT_CALL 1034#undef PARAVIRT_CALL
1035#undef __PVOP_CALL
1036#undef __PVOP_VCALL
1165#undef PVOP_VCALL0 1037#undef PVOP_VCALL0
1166#undef PVOP_CALL0 1038#undef PVOP_CALL0
1167#undef PVOP_VCALL1 1039#undef PVOP_VCALL1