aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/paravirt.h126
-rw-r--r--arch/x86/kernel/paravirt.c8
-rw-r--r--arch/x86/kernel/vsmp_64.c12
-rw-r--r--arch/x86/lguest/boot.c13
-rw-r--r--arch/x86/xen/enlighten.c8
-rw-r--r--arch/x86/xen/irq.c14
6 files changed, 132 insertions, 49 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index dcce961262bf..f9107b88631b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -17,6 +17,10 @@
17#ifdef CONFIG_X86_32 17#ifdef CONFIG_X86_32
18/* CLBR_ANY should match all regs platform has. For i386, that's just it */ 18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
19#define CLBR_ANY ((1 << 4) - 1) 19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX)
23#define CLBR_SCRATCH (0)
20#else 24#else
21#define CLBR_RAX CLBR_EAX 25#define CLBR_RAX CLBR_EAX
22#define CLBR_RCX CLBR_ECX 26#define CLBR_RCX CLBR_ECX
@@ -27,16 +31,19 @@
27#define CLBR_R9 (1 << 6) 31#define CLBR_R9 (1 << 6)
28#define CLBR_R10 (1 << 7) 32#define CLBR_R10 (1 << 7)
29#define CLBR_R11 (1 << 8) 33#define CLBR_R11 (1 << 8)
34
30#define CLBR_ANY ((1 << 9) - 1) 35#define CLBR_ANY ((1 << 9) - 1)
31 36
32#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ 37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
33 CLBR_RCX | CLBR_R8 | CLBR_R9) 38 CLBR_RCX | CLBR_R8 | CLBR_R9)
34#define CLBR_RET_REG (CLBR_RAX | CLBR_RDX) 39#define CLBR_RET_REG (CLBR_RAX)
35#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) 40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
36 41
37#include <asm/desc_defs.h> 42#include <asm/desc_defs.h>
38#endif /* X86_64 */ 43#endif /* X86_64 */
39 44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
40#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
41#include <linux/types.h> 48#include <linux/types.h>
42#include <linux/cpumask.h> 49#include <linux/cpumask.h>
@@ -50,6 +57,14 @@ struct tss_struct;
50struct mm_struct; 57struct mm_struct;
51struct desc_struct; 58struct desc_struct;
52 59
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
53/* general info */ 68/* general info */
54struct pv_info { 69struct pv_info {
55 unsigned int kernel_rpl; 70 unsigned int kernel_rpl;
@@ -199,11 +214,15 @@ struct pv_irq_ops {
199 * expected to use X86_EFLAGS_IF; all other bits 214 * expected to use X86_EFLAGS_IF; all other bits
200 * returned from save_fl are undefined, and may be ignored by 215 * returned from save_fl are undefined, and may be ignored by
201 * restore_fl. 216 * restore_fl.
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
202 */ 220 */
203 unsigned long (*save_fl)(void); 221 struct paravirt_callee_save save_fl;
204 void (*restore_fl)(unsigned long); 222 struct paravirt_callee_save restore_fl;
205 void (*irq_disable)(void); 223 struct paravirt_callee_save irq_disable;
206 void (*irq_enable)(void); 224 struct paravirt_callee_save irq_enable;
225
207 void (*safe_halt)(void); 226 void (*safe_halt)(void);
208 void (*halt)(void); 227 void (*halt)(void);
209 228
@@ -1437,12 +1456,37 @@ extern struct paravirt_patch_site __parainstructions[],
1437 __parainstructions_end[]; 1456 __parainstructions_end[];
1438 1457
1439#ifdef CONFIG_X86_32 1458#ifdef CONFIG_X86_32
1440#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" 1459#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1441#define PV_RESTORE_REGS "popl %%edx; popl %%ecx" 1460#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1461
1462/* save and restore all caller-save registers, except return value */
1463#define PV_SAVE_ALL_CALLER_REGS PV_SAVE_REGS
1464#define PV_RESTORE_ALL_CALLER_REGS PV_RESTORE_REGS
1465
1442#define PV_FLAGS_ARG "0" 1466#define PV_FLAGS_ARG "0"
1443#define PV_EXTRA_CLOBBERS 1467#define PV_EXTRA_CLOBBERS
1444#define PV_VEXTRA_CLOBBERS 1468#define PV_VEXTRA_CLOBBERS
1445#else 1469#else
1470/* save and restore all caller-save registers, except return value */
1471#define PV_SAVE_ALL_CALLER_REGS \
1472 "push %rcx;" \
1473 "push %rdx;" \
1474 "push %rsi;" \
1475 "push %rdi;" \
1476 "push %r8;" \
1477 "push %r9;" \
1478 "push %r10;" \
1479 "push %r11;"
1480#define PV_RESTORE_ALL_CALLER_REGS \
1481 "pop %r11;" \
1482 "pop %r10;" \
1483 "pop %r9;" \
1484 "pop %r8;" \
1485 "pop %rdi;" \
1486 "pop %rsi;" \
1487 "pop %rdx;" \
1488 "pop %rcx;"
1489
1446/* We save some registers, but all of them, that's too much. We clobber all 1490/* We save some registers, but all of them, that's too much. We clobber all
1447 * caller saved registers but the argument parameter */ 1491 * caller saved registers but the argument parameter */
1448#define PV_SAVE_REGS "pushq %%rdi;" 1492#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1452,52 +1496,76 @@ extern struct paravirt_patch_site __parainstructions[],
1452#define PV_FLAGS_ARG "D" 1496#define PV_FLAGS_ARG "D"
1453#endif 1497#endif
1454 1498
1499/*
1500 * Generate a thunk around a function which saves all caller-save
1501 * registers except for the return value. This allows C functions to
1502 * be called from assembler code where fewer than normal registers are
1503 * available. It may also help code generation around calls from C
1504 * code if the common case doesn't use many registers.
1505 *
1506 * When a callee is wrapped in a thunk, the caller can assume that all
1507 * arg regs and all scratch registers are preserved across the
1508 * call. The return value in rax/eax will not be saved, even for void
1509 * functions.
1510 */
1511#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1512 extern typeof(func) __raw_callee_save_##func; \
1513 static void *__##func##__ __used = func; \
1514 \
1515 asm(".pushsection .text;" \
1516 "__raw_callee_save_" #func ": " \
1517 PV_SAVE_ALL_CALLER_REGS \
1518 "call " #func ";" \
1519 PV_RESTORE_ALL_CALLER_REGS \
1520 "ret;" \
1521 ".popsection")
1522
1523/* Get a reference to a callee-save function */
1524#define PV_CALLEE_SAVE(func) \
1525 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1526
1527/* Promise that "func" already uses the right calling convention */
1528#define __PV_IS_CALLEE_SAVE(func) \
1529 ((struct paravirt_callee_save) { func })
1530
1455static inline unsigned long __raw_local_save_flags(void) 1531static inline unsigned long __raw_local_save_flags(void)
1456{ 1532{
1457 unsigned long f; 1533 unsigned long f;
1458 1534
1459 asm volatile(paravirt_alt(PV_SAVE_REGS 1535 asm volatile(paravirt_alt(PARAVIRT_CALL)
1460 PARAVIRT_CALL
1461 PV_RESTORE_REGS)
1462 : "=a"(f) 1536 : "=a"(f)
1463 : paravirt_type(pv_irq_ops.save_fl), 1537 : paravirt_type(pv_irq_ops.save_fl),
1464 paravirt_clobber(CLBR_EAX) 1538 paravirt_clobber(CLBR_EAX)
1465 : "memory", "cc" PV_VEXTRA_CLOBBERS); 1539 : "memory", "cc");
1466 return f; 1540 return f;
1467} 1541}
1468 1542
1469static inline void raw_local_irq_restore(unsigned long f) 1543static inline void raw_local_irq_restore(unsigned long f)
1470{ 1544{
1471 asm volatile(paravirt_alt(PV_SAVE_REGS 1545 asm volatile(paravirt_alt(PARAVIRT_CALL)
1472 PARAVIRT_CALL
1473 PV_RESTORE_REGS)
1474 : "=a"(f) 1546 : "=a"(f)
1475 : PV_FLAGS_ARG(f), 1547 : PV_FLAGS_ARG(f),
1476 paravirt_type(pv_irq_ops.restore_fl), 1548 paravirt_type(pv_irq_ops.restore_fl),
1477 paravirt_clobber(CLBR_EAX) 1549 paravirt_clobber(CLBR_EAX)
1478 : "memory", "cc" PV_EXTRA_CLOBBERS); 1550 : "memory", "cc");
1479} 1551}
1480 1552
1481static inline void raw_local_irq_disable(void) 1553static inline void raw_local_irq_disable(void)
1482{ 1554{
1483 asm volatile(paravirt_alt(PV_SAVE_REGS 1555 asm volatile(paravirt_alt(PARAVIRT_CALL)
1484 PARAVIRT_CALL
1485 PV_RESTORE_REGS)
1486 : 1556 :
1487 : paravirt_type(pv_irq_ops.irq_disable), 1557 : paravirt_type(pv_irq_ops.irq_disable),
1488 paravirt_clobber(CLBR_EAX) 1558 paravirt_clobber(CLBR_EAX)
1489 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1559 : "memory", "eax", "cc");
1490} 1560}
1491 1561
1492static inline void raw_local_irq_enable(void) 1562static inline void raw_local_irq_enable(void)
1493{ 1563{
1494 asm volatile(paravirt_alt(PV_SAVE_REGS 1564 asm volatile(paravirt_alt(PARAVIRT_CALL)
1495 PARAVIRT_CALL
1496 PV_RESTORE_REGS)
1497 : 1565 :
1498 : paravirt_type(pv_irq_ops.irq_enable), 1566 : paravirt_type(pv_irq_ops.irq_enable),
1499 paravirt_clobber(CLBR_EAX) 1567 paravirt_clobber(CLBR_EAX)
1500 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); 1568 : "memory", "eax", "cc");
1501} 1569}
1502 1570
1503static inline unsigned long __raw_local_irq_save(void) 1571static inline unsigned long __raw_local_irq_save(void)
@@ -1541,9 +1609,9 @@ static inline unsigned long __raw_local_irq_save(void)
1541 1609
1542 1610
1543#define COND_PUSH(set, mask, reg) \ 1611#define COND_PUSH(set, mask, reg) \
1544 .if ((~set) & mask); push %reg; .endif 1612 .if ((~(set)) & mask); push %reg; .endif
1545#define COND_POP(set, mask, reg) \ 1613#define COND_POP(set, mask, reg) \
1546 .if ((~set) & mask); pop %reg; .endif 1614 .if ((~(set)) & mask); pop %reg; .endif
1547 1615
1548#ifdef CONFIG_X86_64 1616#ifdef CONFIG_X86_64
1549 1617
@@ -1594,15 +1662,15 @@ static inline unsigned long __raw_local_irq_save(void)
1594 1662
1595#define DISABLE_INTERRUPTS(clobbers) \ 1663#define DISABLE_INTERRUPTS(clobbers) \
1596 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1664 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1597 PV_SAVE_REGS(clobbers); \ 1665 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1598 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ 1666 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1599 PV_RESTORE_REGS(clobbers);) 1667 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1600 1668
1601#define ENABLE_INTERRUPTS(clobbers) \ 1669#define ENABLE_INTERRUPTS(clobbers) \
1602 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1670 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1603 PV_SAVE_REGS(clobbers); \ 1671 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
1604 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 1672 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1605 PV_RESTORE_REGS(clobbers);) 1673 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1606 1674
1607#define USERGS_SYSRET32 \ 1675#define USERGS_SYSRET32 \
1608 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 1676 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index dd25e2b1593b..8adb6b5aa421 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -310,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
310 310
311struct pv_irq_ops pv_irq_ops = { 311struct pv_irq_ops pv_irq_ops = {
312 .init_IRQ = native_init_IRQ, 312 .init_IRQ = native_init_IRQ,
313 .save_fl = native_save_fl, 313 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
314 .restore_fl = native_restore_fl, 314 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
315 .irq_disable = native_irq_disable, 315 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
316 .irq_enable = native_irq_enable, 316 .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
317 .safe_halt = native_safe_halt, 317 .safe_halt = native_safe_halt,
318 .halt = native_halt, 318 .halt = native_halt,
319#ifdef CONFIG_X86_64 319#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index a688f3bfaec2..c609205df594 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
37 flags &= ~X86_EFLAGS_IF; 37 flags &= ~X86_EFLAGS_IF;
38 return flags; 38 return flags;
39} 39}
40PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
40 41
41static void vsmp_restore_fl(unsigned long flags) 42static void vsmp_restore_fl(unsigned long flags)
42{ 43{
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
46 flags |= X86_EFLAGS_AC; 47 flags |= X86_EFLAGS_AC;
47 native_restore_fl(flags); 48 native_restore_fl(flags);
48} 49}
50PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
49 51
50static void vsmp_irq_disable(void) 52static void vsmp_irq_disable(void)
51{ 53{
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
53 55
54 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); 56 native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
55} 57}
58PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
56 59
57static void vsmp_irq_enable(void) 60static void vsmp_irq_enable(void)
58{ 61{
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
60 63
61 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); 64 native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
62} 65}
66PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
63 67
64static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, 68static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
65 unsigned long addr, unsigned len) 69 unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
90 cap, ctl); 94 cap, ctl);
91 if (cap & ctl & (1 << 4)) { 95 if (cap & ctl & (1 << 4)) {
92 /* Setup irq ops and turn on vSMP IRQ fastpath handling */ 96 /* Setup irq ops and turn on vSMP IRQ fastpath handling */
93 pv_irq_ops.irq_disable = vsmp_irq_disable; 97 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
94 pv_irq_ops.irq_enable = vsmp_irq_enable; 98 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
95 pv_irq_ops.save_fl = vsmp_save_fl; 99 pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
96 pv_irq_ops.restore_fl = vsmp_restore_fl; 100 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
97 pv_init_ops.patch = vsmp_patch; 101 pv_init_ops.patch = vsmp_patch;
98 102
99 ctl &= ~(1 << 4); 103 ctl &= ~(1 << 4);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 92f1c6f3e19d..19e33b6cd593 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
173{ 173{
174 return lguest_data.irq_enabled; 174 return lguest_data.irq_enabled;
175} 175}
176PV_CALLEE_SAVE_REGS_THUNK(save_fl);
176 177
177/* restore_flags() just sets the flags back to the value given. */ 178/* restore_flags() just sets the flags back to the value given. */
178static void restore_fl(unsigned long flags) 179static void restore_fl(unsigned long flags)
179{ 180{
180 lguest_data.irq_enabled = flags; 181 lguest_data.irq_enabled = flags;
181} 182}
183PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
182 184
183/* Interrupts go off... */ 185/* Interrupts go off... */
184static void irq_disable(void) 186static void irq_disable(void)
185{ 187{
186 lguest_data.irq_enabled = 0; 188 lguest_data.irq_enabled = 0;
187} 189}
190PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
188 191
189/* Interrupts go on... */ 192/* Interrupts go on... */
190static void irq_enable(void) 193static void irq_enable(void)
191{ 194{
192 lguest_data.irq_enabled = X86_EFLAGS_IF; 195 lguest_data.irq_enabled = X86_EFLAGS_IF;
193} 196}
197PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
198
194/*:*/ 199/*:*/
195/*M:003 Note that we don't check for outstanding interrupts when we re-enable 200/*M:003 Note that we don't check for outstanding interrupts when we re-enable
196 * them (or when we unmask an interrupt). This seems to work for the moment, 201 * them (or when we unmask an interrupt). This seems to work for the moment,
@@ -984,10 +989,10 @@ __init void lguest_init(void)
984 989
985 /* interrupt-related operations */ 990 /* interrupt-related operations */
986 pv_irq_ops.init_IRQ = lguest_init_IRQ; 991 pv_irq_ops.init_IRQ = lguest_init_IRQ;
987 pv_irq_ops.save_fl = save_fl; 992 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
988 pv_irq_ops.restore_fl = restore_fl; 993 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
989 pv_irq_ops.irq_disable = irq_disable; 994 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
990 pv_irq_ops.irq_enable = irq_enable; 995 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
991 pv_irq_ops.safe_halt = lguest_safe_halt; 996 pv_irq_ops.safe_halt = lguest_safe_halt;
992 997
993 /* init-time operations */ 998 /* init-time operations */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0cd2a165f179..ff6d530ccc77 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -676,10 +676,10 @@ void xen_setup_vcpu_info_placement(void)
676 if (have_vcpu_info_placement) { 676 if (have_vcpu_info_placement) {
677 printk(KERN_INFO "Xen: using vcpu_info placement\n"); 677 printk(KERN_INFO "Xen: using vcpu_info placement\n");
678 678
679 pv_irq_ops.save_fl = xen_save_fl_direct; 679 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
680 pv_irq_ops.restore_fl = xen_restore_fl_direct; 680 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
681 pv_irq_ops.irq_disable = xen_irq_disable_direct; 681 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
682 pv_irq_ops.irq_enable = xen_irq_enable_direct; 682 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
683 pv_mmu_ops.read_cr2 = xen_read_cr2_direct; 683 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
684 } 684 }
685} 685}
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 2e8271431e1a..5a070900ad35 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void)
50 */ 50 */
51 return (-flags) & X86_EFLAGS_IF; 51 return (-flags) & X86_EFLAGS_IF;
52} 52}
53PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
53 54
54static void xen_restore_fl(unsigned long flags) 55static void xen_restore_fl(unsigned long flags)
55{ 56{
@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags)
76 xen_force_evtchn_callback(); 77 xen_force_evtchn_callback();
77 } 78 }
78} 79}
80PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
79 81
80static void xen_irq_disable(void) 82static void xen_irq_disable(void)
81{ 83{
@@ -86,6 +88,7 @@ static void xen_irq_disable(void)
86 percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; 88 percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
87 preempt_enable_no_resched(); 89 preempt_enable_no_resched();
88} 90}
91PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
89 92
90static void xen_irq_enable(void) 93static void xen_irq_enable(void)
91{ 94{
@@ -106,6 +109,7 @@ static void xen_irq_enable(void)
106 if (unlikely(vcpu->evtchn_upcall_pending)) 109 if (unlikely(vcpu->evtchn_upcall_pending))
107 xen_force_evtchn_callback(); 110 xen_force_evtchn_callback();
108} 111}
112PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
109 113
110static void xen_safe_halt(void) 114static void xen_safe_halt(void)
111{ 115{
@@ -124,10 +128,12 @@ static void xen_halt(void)
124 128
125static const struct pv_irq_ops xen_irq_ops __initdata = { 129static const struct pv_irq_ops xen_irq_ops __initdata = {
126 .init_IRQ = __xen_init_IRQ, 130 .init_IRQ = __xen_init_IRQ,
127 .save_fl = xen_save_fl, 131
128 .restore_fl = xen_restore_fl, 132 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
129 .irq_disable = xen_irq_disable, 133 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
130 .irq_enable = xen_irq_enable, 134 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
135 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
136
131 .safe_halt = xen_safe_halt, 137 .safe_halt = xen_safe_halt,
132 .halt = xen_halt, 138 .halt = xen_halt,
133#ifdef CONFIG_X86_64 139#ifdef CONFIG_X86_64