diff options
| -rw-r--r-- | arch/ia64/include/asm/xen/privop.h | 4 | ||||
| -rw-r--r-- | arch/ia64/xen/hypercall.S | 2 | ||||
| -rw-r--r-- | arch/ia64/xen/xen_pv_ops.c | 665 |
3 files changed, 671 insertions, 0 deletions
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h index 2261dda578ff..e5fbaeeb161a 100644 --- a/arch/ia64/include/asm/xen/privop.h +++ b/arch/ia64/include/asm/xen/privop.h | |||
| @@ -82,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr); | |||
| 82 | extern unsigned long xen_get_cpuid(int index); | 82 | extern unsigned long xen_get_cpuid(int index); |
| 83 | extern unsigned long xen_get_pmd(int index); | 83 | extern unsigned long xen_get_pmd(int index); |
| 84 | 84 | ||
| 85 | #ifndef ASM_SUPPORTED | ||
| 85 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ | 86 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ |
| 86 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | 87 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ |
| 88 | #endif | ||
| 87 | 89 | ||
| 88 | /************************************************/ | 90 | /************************************************/ |
| 89 | /* Instructions paravirtualized for performance */ | 91 | /* Instructions paravirtualized for performance */ |
| @@ -108,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | |||
| 108 | #define xen_get_virtual_pend() \ | 110 | #define xen_get_virtual_pend() \ |
| 109 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) | 111 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) |
| 110 | 112 | ||
| 113 | #ifndef ASM_SUPPORTED | ||
| 111 | /* Although all privileged operations can be left to trap and will | 114 | /* Although all privileged operations can be left to trap and will |
| 112 | * be properly handled by Xen, some are frequent enough that we use | 115 | * be properly handled by Xen, some are frequent enough that we use |
| 113 | * hyperprivops for performance. */ | 116 | * hyperprivops for performance. */ |
| @@ -125,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | |||
| 125 | unsigned long val4); | 128 | unsigned long val4); |
| 126 | extern void xen_set_kr(unsigned long index, unsigned long val); | 129 | extern void xen_set_kr(unsigned long index, unsigned long val); |
| 127 | extern void xen_ptcga(unsigned long addr, unsigned long size); | 130 | extern void xen_ptcga(unsigned long addr, unsigned long size); |
| 131 | #endif /* !ASM_SUPPORTED */ | ||
| 128 | 132 | ||
| 129 | #endif /* !__ASSEMBLY__ */ | 133 | #endif /* !__ASSEMBLY__ */ |
| 130 | 134 | ||
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index 45e02bb64a92..e32dae444dd6 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <asm/intrinsics.h> | 9 | #include <asm/intrinsics.h> |
| 10 | #include <asm/xen/privop.h> | 10 | #include <asm/xen/privop.h> |
| 11 | 11 | ||
| 12 | #ifdef __INTEL_COMPILER | ||
| 12 | /* | 13 | /* |
| 13 | * Hypercalls without parameter. | 14 | * Hypercalls without parameter. |
| 14 | */ | 15 | */ |
| @@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4) | |||
| 72 | br.ret.sptk.many rp | 73 | br.ret.sptk.many rp |
| 73 | ;; | 74 | ;; |
| 74 | END(xen_set_rr0_to_rr4) | 75 | END(xen_set_rr0_to_rr4) |
| 76 | #endif | ||
| 75 | 77 | ||
| 76 | GLOBAL_ENTRY(xen_send_ipi) | 78 | GLOBAL_ENTRY(xen_send_ipi) |
| 77 | mov r14=r32 | 79 | mov r14=r32 |
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index bdf1acbce81c..6c44225e7b84 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c | |||
| @@ -154,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void) | |||
| 154 | xen_setup_vcpu_info_placement(); | 154 | xen_setup_vcpu_info_placement(); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | #ifdef ASM_SUPPORTED | ||
| 158 | static unsigned long __init_or_module | ||
| 159 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); | ||
| 160 | #endif | ||
| 161 | static void __init | ||
| 162 | xen_patch_branch(unsigned long tag, unsigned long type); | ||
| 163 | |||
| 157 | static const struct pv_init_ops xen_init_ops __initconst = { | 164 | static const struct pv_init_ops xen_init_ops __initconst = { |
| 158 | .banner = xen_banner, | 165 | .banner = xen_banner, |
| 159 | 166 | ||
| @@ -164,6 +171,10 @@ static const struct pv_init_ops xen_init_ops __initconst = { | |||
| 164 | .arch_setup_nomca = xen_arch_setup_nomca, | 171 | .arch_setup_nomca = xen_arch_setup_nomca, |
| 165 | 172 | ||
| 166 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, | 173 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, |
| 174 | #ifdef ASM_SUPPORTED | ||
| 175 | .patch_bundle = xen_patch_bundle, | ||
| 176 | #endif | ||
| 177 | .patch_branch = xen_patch_branch, | ||
| 167 | }; | 178 | }; |
| 168 | 179 | ||
| 169 | /*************************************************************************** | 180 | /*************************************************************************** |
| @@ -214,6 +225,7 @@ static struct pv_patchdata xen_patchdata __initdata = { | |||
| 214 | * intrinsics hooks. | 225 | * intrinsics hooks. |
| 215 | */ | 226 | */ |
| 216 | 227 | ||
| 228 | #ifndef ASM_SUPPORTED | ||
| 217 | static void | 229 | static void |
| 218 | xen_set_itm_with_offset(unsigned long val) | 230 | xen_set_itm_with_offset(unsigned long val) |
| 219 | { | 231 | { |
| @@ -381,6 +393,410 @@ xen_intrin_local_irq_restore(unsigned long mask) | |||
| 381 | else | 393 | else |
| 382 | xen_rsm_i(); | 394 | xen_rsm_i(); |
| 383 | } | 395 | } |
| 396 | #else | ||
| 397 | #define __DEFINE_FUNC(name, code) \ | ||
| 398 | extern const char xen_ ## name ## _direct_start[]; \ | ||
| 399 | extern const char xen_ ## name ## _direct_end[]; \ | ||
| 400 | asm (".align 32\n" \ | ||
| 401 | ".proc xen_" #name "\n" \ | ||
| 402 | "xen_" #name ":\n" \ | ||
| 403 | "xen_" #name "_direct_start:\n" \ | ||
| 404 | code \ | ||
| 405 | "xen_" #name "_direct_end:\n" \ | ||
| 406 | "br.cond.sptk.many b6\n" \ | ||
| 407 | ".endp xen_" #name "\n") | ||
| 408 | |||
| 409 | #define DEFINE_VOID_FUNC0(name, code) \ | ||
| 410 | extern void \ | ||
| 411 | xen_ ## name (void); \ | ||
| 412 | __DEFINE_FUNC(name, code) | ||
| 413 | |||
| 414 | #define DEFINE_VOID_FUNC1(name, code) \ | ||
| 415 | extern void \ | ||
| 416 | xen_ ## name (unsigned long arg); \ | ||
| 417 | __DEFINE_FUNC(name, code) | ||
| 418 | |||
| 419 | #define DEFINE_VOID_FUNC2(name, code) \ | ||
| 420 | extern void \ | ||
| 421 | xen_ ## name (unsigned long arg0, \ | ||
| 422 | unsigned long arg1); \ | ||
| 423 | __DEFINE_FUNC(name, code) | ||
| 424 | |||
| 425 | #define DEFINE_FUNC0(name, code) \ | ||
| 426 | extern unsigned long \ | ||
| 427 | xen_ ## name (void); \ | ||
| 428 | __DEFINE_FUNC(name, code) | ||
| 429 | |||
| 430 | #define DEFINE_FUNC1(name, type, code) \ | ||
| 431 | extern unsigned long \ | ||
| 432 | xen_ ## name (type arg); \ | ||
| 433 | __DEFINE_FUNC(name, code) | ||
| 434 | |||
| 435 | #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) | ||
| 436 | |||
| 437 | /* | ||
| 438 | * static void xen_set_itm_with_offset(unsigned long val) | ||
| 439 | * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
| 440 | */ | ||
| 441 | /* 2 bundles */ | ||
| 442 | DEFINE_VOID_FUNC1(set_itm_with_offset, | ||
| 443 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 444 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 445 | ";;\n" | ||
| 446 | "ld8 r3 = [r2]\n" | ||
| 447 | ";;\n" | ||
| 448 | "sub r8 = r8, r3\n" | ||
| 449 | "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); | ||
| 450 | |||
| 451 | /* | ||
| 452 | * static unsigned long xen_get_itm_with_offset(void) | ||
| 453 | * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; | ||
| 454 | */ | ||
| 455 | /* 2 bundles */ | ||
| 456 | DEFINE_FUNC0(get_itm_with_offset, | ||
| 457 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 458 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 459 | ";;\n" | ||
| 460 | "ld8 r3 = [r2]\n" | ||
| 461 | "mov r8 = cr.itm\n" | ||
| 462 | ";;\n" | ||
| 463 | "add r8 = r8, r2\n"); | ||
| 464 | |||
| 465 | /* | ||
| 466 | * static void xen_set_itc(unsigned long val) | ||
| 467 | * unsigned long mitc; | ||
| 468 | * | ||
| 469 | * WARN_ON(!irqs_disabled()); | ||
| 470 | * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 471 | * XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
| 472 | * XEN_MAPPEDREGS->itc_last = val; | ||
| 473 | */ | ||
| 474 | /* 2 bundles */ | ||
| 475 | DEFINE_VOID_FUNC1(set_itc, | ||
| 476 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 477 | __stringify(XSI_ITC_LAST_OFS) "\n" | ||
| 478 | "mov r3 = ar.itc\n" | ||
| 479 | ";;\n" | ||
| 480 | "sub r3 = r8, r3\n" | ||
| 481 | "st8 [r2] = r8, " | ||
| 482 | __stringify(XSI_ITC_LAST_OFS) " - " | ||
| 483 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 484 | ";;\n" | ||
| 485 | "st8 [r2] = r3\n"); | ||
| 486 | |||
| 487 | /* | ||
| 488 | * static unsigned long xen_get_itc(void) | ||
| 489 | * unsigned long res; | ||
| 490 | * unsigned long itc_offset; | ||
| 491 | * unsigned long itc_last; | ||
| 492 | * unsigned long ret_itc_last; | ||
| 493 | * | ||
| 494 | * itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
| 495 | * do { | ||
| 496 | * itc_last = XEN_MAPPEDREGS->itc_last; | ||
| 497 | * res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 498 | * res += itc_offset; | ||
| 499 | * if (itc_last >= res) | ||
| 500 | * res = itc_last + 1; | ||
| 501 | * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
| 502 | * itc_last, res); | ||
| 503 | * } while (unlikely(ret_itc_last != itc_last)); | ||
| 504 | * return res; | ||
| 505 | */ | ||
| 506 | /* 5 bundles */ | ||
| 507 | DEFINE_FUNC0(get_itc, | ||
| 508 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 509 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 510 | ";;\n" | ||
| 511 | "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " | ||
| 512 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 513 | /* r9 = itc_offset */ | ||
| 514 | /* r2 = XSI_ITC_OFFSET */ | ||
| 515 | "888:\n" | ||
| 516 | "mov r8 = ar.itc\n" /* res = ar.itc */ | ||
| 517 | ";;\n" | ||
| 518 | "ld8 r3 = [r2]\n" /* r3 = itc_last */ | ||
| 519 | "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ | ||
| 520 | ";;\n" | ||
| 521 | "cmp.gtu p6, p0 = r3, r8\n" | ||
| 522 | ";;\n" | ||
| 523 | "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ | ||
| 524 | ";;\n" | ||
| 525 | "mov ar.ccv = r8\n" | ||
| 526 | ";;\n" | ||
| 527 | "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" | ||
| 528 | ";;\n" | ||
| 529 | "cmp.ne p6, p0 = r10, r3\n" | ||
| 530 | "(p6) hint @pause\n" | ||
| 531 | "(p6) br.cond.spnt 888b\n"); | ||
| 532 | |||
| 533 | DEFINE_VOID_FUNC1(fc, | ||
| 534 | "break " __stringify(HYPERPRIVOP_FC) "\n"); | ||
| 535 | |||
| 536 | /* | ||
| 537 | * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR | ||
| 538 | * masked_addr = *psr_i_addr_addr | ||
| 539 | * pending_intr_addr = masked_addr - 1 | ||
| 540 | * if (val & IA64_PSR_I) { | ||
| 541 | * masked = *masked_addr | ||
| 542 | * *masked_addr = 0:xen_set_virtual_psr_i(1) | ||
| 543 | * compiler barrier | ||
| 544 | * if (masked) { | ||
| 545 | * uint8_t pending = *pending_intr_addr; | ||
| 546 | * if (pending) | ||
| 547 | * XEN_HYPER_SSM_I | ||
| 548 | * } | ||
| 549 | * } else { | ||
| 550 | * *masked_addr = 1:xen_set_virtual_psr_i(0) | ||
| 551 | * } | ||
| 552 | */ | ||
| 553 | /* 6 bundles */ | ||
| 554 | DEFINE_VOID_FUNC1(intrin_local_irq_restore, | ||
| 555 | /* r8 = input value: 0 or IA64_PSR_I | ||
| 556 | * p6 = (flags & IA64_PSR_I) | ||
| 557 | * = if clause | ||
| 558 | * p7 = !(flags & IA64_PSR_I) | ||
| 559 | * = else clause | ||
| 560 | */ | ||
| 561 | "cmp.ne p6, p7 = r8, r0\n" | ||
| 562 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 563 | ";;\n" | ||
| 564 | /* r9 = XEN_PSR_I_ADDR */ | ||
| 565 | "ld8 r9 = [r9]\n" | ||
| 566 | ";;\n" | ||
| 567 | |||
| 568 | /* r10 = masked previous value */ | ||
| 569 | "(p6) ld1.acq r10 = [r9]\n" | ||
| 570 | ";;\n" | ||
| 571 | |||
| 572 | /* p8 = !masked interrupt masked previously? */ | ||
| 573 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
| 574 | |||
| 575 | /* p7 = else clause */ | ||
| 576 | "(p7) mov r11 = 1\n" | ||
| 577 | ";;\n" | ||
| 578 | /* masked = 1 */ | ||
| 579 | "(p7) st1.rel [r9] = r11\n" | ||
| 580 | |||
| 581 | /* p6 = if clause */ | ||
| 582 | /* masked = 0 | ||
| 583 | * r9 = masked_addr - 1 | ||
| 584 | * = pending_intr_addr | ||
| 585 | */ | ||
| 586 | "(p8) st1.rel [r9] = r0, -1\n" | ||
| 587 | ";;\n" | ||
| 588 | /* r8 = pending_intr */ | ||
| 589 | "(p8) ld1.acq r11 = [r9]\n" | ||
| 590 | ";;\n" | ||
| 591 | /* p9 = interrupt pending? */ | ||
| 592 | "(p8) cmp.ne.unc p9, p10 = r11, r0\n" | ||
| 593 | ";;\n" | ||
| 594 | "(p10) mf\n" | ||
| 595 | /* issue hypercall to trigger interrupt */ | ||
| 596 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); | ||
| 597 | |||
| 598 | DEFINE_VOID_FUNC2(ptcga, | ||
| 599 | "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); | ||
| 600 | DEFINE_VOID_FUNC2(set_rr, | ||
| 601 | "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); | ||
| 602 | |||
| 603 | /* | ||
| 604 | * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; | ||
| 605 | * tmp = *tmp | ||
| 606 | * tmp = *tmp; | ||
| 607 | * psr_i = tmp? 0: IA64_PSR_I; | ||
| 608 | */ | ||
| 609 | /* 4 bundles */ | ||
| 610 | DEFINE_FUNC0(get_psr_i, | ||
| 611 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 612 | ";;\n" | ||
| 613 | "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ | ||
| 614 | "mov r8 = 0\n" /* psr_i = 0 */ | ||
| 615 | ";;\n" | ||
| 616 | "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ | ||
| 617 | ";;\n" | ||
| 618 | "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ | ||
| 619 | ";;\n" | ||
| 620 | "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); | ||
| 621 | |||
| 622 | DEFINE_FUNC1(thash, unsigned long, | ||
| 623 | "break " __stringify(HYPERPRIVOP_THASH) "\n"); | ||
| 624 | DEFINE_FUNC1(get_cpuid, int, | ||
| 625 | "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); | ||
| 626 | DEFINE_FUNC1(get_pmd, int, | ||
| 627 | "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); | ||
| 628 | DEFINE_FUNC1(get_rr, unsigned long, | ||
| 629 | "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); | ||
| 630 | |||
| 631 | /* | ||
| 632 | * void xen_privop_ssm_i(void) | ||
| 633 | * | ||
| 634 | * int masked = !xen_get_virtual_psr_i(); | ||
| 635 | * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) | ||
| 636 | * xen_set_virtual_psr_i(1) | ||
| 637 | * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 | ||
| 638 | * // compiler barrier | ||
| 639 | * if (masked) { | ||
| 640 | * uint8_t* pend_int_addr = | ||
| 641 | * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; | ||
| 642 | * uint8_t pending = *pend_int_addr; | ||
| 643 | * if (pending) | ||
| 644 | * XEN_HYPER_SSM_I | ||
| 645 | * } | ||
| 646 | */ | ||
| 647 | /* 4 bundles */ | ||
| 648 | DEFINE_VOID_FUNC0(ssm_i, | ||
| 649 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 650 | ";;\n" | ||
| 651 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ | ||
| 652 | ";;\n" | ||
| 653 | "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ | ||
| 654 | ";;\n" | ||
| 655 | "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt | ||
| 656 | * r8 = XEN_PSR_I_ADDR - 1 | ||
| 657 | * = pend_int_addr | ||
| 658 | */ | ||
| 659 | "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I | ||
| 660 | * previously interrupt | ||
| 661 | * masked? | ||
| 662 | */ | ||
| 663 | ";;\n" | ||
| 664 | "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ | ||
| 665 | ";;\n" | ||
| 666 | "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ | ||
| 667 | ";;\n" | ||
| 668 | /* issue hypercall to get interrupt */ | ||
| 669 | "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
| 670 | ";;\n"); | ||
| 671 | |||
| 672 | /* | ||
| 673 | * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr | ||
| 674 | * = XEN_PSR_I_ADDR_ADDR; | ||
| 675 | * psr_i_addr = *psr_i_addr_addr; | ||
| 676 | * *psr_i_addr = 1; | ||
| 677 | */ | ||
| 678 | /* 2 bundles */ | ||
| 679 | DEFINE_VOID_FUNC0(rsm_i, | ||
| 680 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 681 | /* r8 = XEN_PSR_I_ADDR */ | ||
| 682 | "mov r9 = 1\n" | ||
| 683 | ";;\n" | ||
| 684 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ | ||
| 685 | ";;\n" | ||
| 686 | "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ | ||
| 687 | |||
| 688 | extern void | ||
| 689 | xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
| 690 | unsigned long val2, unsigned long val3, | ||
| 691 | unsigned long val4); | ||
| 692 | __DEFINE_FUNC(set_rr0_to_rr4, | ||
| 693 | "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); | ||
| 694 | |||
| 695 | |||
| 696 | extern unsigned long xen_getreg(int regnum); | ||
| 697 | #define __DEFINE_GET_REG(id, privop) \ | ||
| 698 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
| 699 | ";;\n" \ | ||
| 700 | "cmp.eq p6, p0 = r2, r8\n" \ | ||
| 701 | ";;\n" \ | ||
| 702 | "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ | ||
| 703 | "(p6) br.cond.sptk.many b6\n" \ | ||
| 704 | ";;\n" | ||
| 705 | |||
| 706 | __DEFINE_FUNC(getreg, | ||
| 707 | __DEFINE_GET_REG(PSR, PSR) | ||
| 708 | #ifdef CONFIG_IA32_SUPPORT | ||
| 709 | __DEFINE_GET_REG(AR_EFLAG, EFLAG) | ||
| 710 | #endif | ||
| 711 | |||
| 712 | /* get_itc */ | ||
| 713 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
| 714 | ";;\n" | ||
| 715 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 716 | ";;\n" | ||
| 717 | "(p6) br.cond.spnt xen_get_itc\n" | ||
| 718 | ";;\n" | ||
| 719 | |||
| 720 | /* get itm */ | ||
| 721 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
| 722 | ";;\n" | ||
| 723 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 724 | ";;\n" | ||
| 725 | "(p6) br.cond.spnt xen_get_itm_with_offset\n" | ||
| 726 | ";;\n" | ||
| 727 | |||
| 728 | __DEFINE_GET_REG(CR_IVR, IVR) | ||
| 729 | __DEFINE_GET_REG(CR_TPR, TPR) | ||
| 730 | |||
| 731 | /* fall back */ | ||
| 732 | "movl r2 = ia64_native_getreg_func\n" | ||
| 733 | ";;\n" | ||
| 734 | "mov b7 = r2\n" | ||
| 735 | ";;\n" | ||
| 736 | "br.cond.sptk.many b7\n"); | ||
| 737 | |||
| 738 | extern void xen_setreg(int regnum, unsigned long val); | ||
| 739 | #define __DEFINE_SET_REG(id, privop) \ | ||
| 740 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
| 741 | ";;\n" \ | ||
| 742 | "cmp.eq p6, p0 = r2, r9\n" \ | ||
| 743 | ";;\n" \ | ||
| 744 | "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ | ||
| 745 | "(p6) br.cond.sptk.many b6\n" \ | ||
| 746 | ";;\n" | ||
| 747 | |||
| 748 | __DEFINE_FUNC(setreg, | ||
| 749 | /* kr0 .. kr 7*/ | ||
| 750 | /* | ||
| 751 | * if (_IA64_REG_AR_KR0 <= regnum && | ||
| 752 | * regnum <= _IA64_REG_AR_KR7) { | ||
| 753 | * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 | ||
| 754 | * register __val asm ("r9") = val | ||
| 755 | * "break HYPERPRIVOP_SET_KR" | ||
| 756 | * } | ||
| 757 | */ | ||
| 758 | "mov r17 = r9\n" | ||
| 759 | "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" | ||
| 760 | ";;\n" | ||
| 761 | "cmp.ge p6, p0 = r9, r2\n" | ||
| 762 | "sub r17 = r17, r2\n" | ||
| 763 | ";;\n" | ||
| 764 | "(p6) cmp.ge.unc p7, p0 = " | ||
| 765 | __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) | ||
| 766 | ", r17\n" | ||
| 767 | ";;\n" | ||
| 768 | "(p7) mov r9 = r8\n" | ||
| 769 | ";;\n" | ||
| 770 | "(p7) mov r8 = r17\n" | ||
| 771 | "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" | ||
| 772 | |||
| 773 | /* set itm */ | ||
| 774 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
| 775 | ";;\n" | ||
| 776 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 777 | ";;\n" | ||
| 778 | "(p6) br.cond.spnt xen_set_itm_with_offset\n" | ||
| 779 | |||
| 780 | /* set itc */ | ||
| 781 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
| 782 | ";;\n" | ||
| 783 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 784 | ";;\n" | ||
| 785 | "(p6) br.cond.spnt xen_set_itc\n" | ||
| 786 | |||
| 787 | #ifdef CONFIG_IA32_SUPPORT | ||
| 788 | __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) | ||
| 789 | #endif | ||
| 790 | __DEFINE_SET_REG(CR_TPR, SET_TPR) | ||
| 791 | __DEFINE_SET_REG(CR_EOI, EOI) | ||
| 792 | |||
| 793 | /* fall back */ | ||
| 794 | "movl r2 = ia64_native_setreg_func\n" | ||
| 795 | ";;\n" | ||
| 796 | "mov b7 = r2\n" | ||
| 797 | ";;\n" | ||
| 798 | "br.cond.sptk.many b7\n"); | ||
| 799 | #endif | ||
| 384 | 800 | ||
| 385 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { | 801 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { |
| 386 | .fc = xen_fc, | 802 | .fc = xen_fc, |
| @@ -486,3 +902,252 @@ xen_setup_pv_ops(void) | |||
| 486 | 902 | ||
| 487 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); | 903 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); |
| 488 | } | 904 | } |
| 905 | |||
| 906 | #ifdef ASM_SUPPORTED | ||
| 907 | /*************************************************************************** | ||
| 908 | * binary pacthing | ||
| 909 | * pv_init_ops.patch_bundle | ||
| 910 | */ | ||
| 911 | |||
| 912 | #define DEFINE_FUNC_GETREG(name, privop) \ | ||
| 913 | DEFINE_FUNC0(get_ ## name, \ | ||
| 914 | "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") | ||
| 915 | |||
| 916 | DEFINE_FUNC_GETREG(psr, PSR); | ||
| 917 | DEFINE_FUNC_GETREG(eflag, EFLAG); | ||
| 918 | DEFINE_FUNC_GETREG(ivr, IVR); | ||
| 919 | DEFINE_FUNC_GETREG(tpr, TPR); | ||
| 920 | |||
| 921 | #define DEFINE_FUNC_SET_KR(n) \ | ||
| 922 | DEFINE_VOID_FUNC0(set_kr ## n, \ | ||
| 923 | ";;\n" \ | ||
| 924 | "mov r9 = r8\n" \ | ||
| 925 | "mov r8 = " #n "\n" \ | ||
| 926 | "break " __stringify(HYPERPRIVOP_SET_KR) "\n") | ||
| 927 | |||
| 928 | DEFINE_FUNC_SET_KR(0); | ||
| 929 | DEFINE_FUNC_SET_KR(1); | ||
| 930 | DEFINE_FUNC_SET_KR(2); | ||
| 931 | DEFINE_FUNC_SET_KR(3); | ||
| 932 | DEFINE_FUNC_SET_KR(4); | ||
| 933 | DEFINE_FUNC_SET_KR(5); | ||
| 934 | DEFINE_FUNC_SET_KR(6); | ||
| 935 | DEFINE_FUNC_SET_KR(7); | ||
| 936 | |||
| 937 | #define __DEFINE_FUNC_SETREG(name, privop) \ | ||
| 938 | DEFINE_VOID_FUNC0(name, \ | ||
| 939 | "break "__stringify(HYPERPRIVOP_ ## privop) "\n") | ||
| 940 | |||
| 941 | #define DEFINE_FUNC_SETREG(name, privop) \ | ||
| 942 | __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) | ||
| 943 | |||
| 944 | DEFINE_FUNC_SETREG(eflag, EFLAG); | ||
| 945 | DEFINE_FUNC_SETREG(tpr, TPR); | ||
| 946 | __DEFINE_FUNC_SETREG(eoi, EOI); | ||
| 947 | |||
| 948 | extern const char xen_check_events[]; | ||
| 949 | extern const char __xen_intrin_local_irq_restore_direct_start[]; | ||
| 950 | extern const char __xen_intrin_local_irq_restore_direct_end[]; | ||
| 951 | extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; | ||
| 952 | |||
| 953 | asm ( | ||
| 954 | ".align 32\n" | ||
| 955 | ".proc xen_check_events\n" | ||
| 956 | "xen_check_events:\n" | ||
| 957 | /* masked = 0 | ||
| 958 | * r9 = masked_addr - 1 | ||
| 959 | * = pending_intr_addr | ||
| 960 | */ | ||
| 961 | "st1.rel [r9] = r0, -1\n" | ||
| 962 | ";;\n" | ||
| 963 | /* r8 = pending_intr */ | ||
| 964 | "ld1.acq r11 = [r9]\n" | ||
| 965 | ";;\n" | ||
| 966 | /* p9 = interrupt pending? */ | ||
| 967 | "cmp.ne p9, p10 = r11, r0\n" | ||
| 968 | ";;\n" | ||
| 969 | "(p10) mf\n" | ||
| 970 | /* issue hypercall to trigger interrupt */ | ||
| 971 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
| 972 | "br.cond.sptk.many b6\n" | ||
| 973 | ".endp xen_check_events\n" | ||
| 974 | "\n" | ||
| 975 | ".align 32\n" | ||
| 976 | ".proc __xen_intrin_local_irq_restore_direct\n" | ||
| 977 | "__xen_intrin_local_irq_restore_direct:\n" | ||
| 978 | "__xen_intrin_local_irq_restore_direct_start:\n" | ||
| 979 | "1:\n" | ||
| 980 | "{\n" | ||
| 981 | "cmp.ne p6, p7 = r8, r0\n" | ||
| 982 | "mov r17 = ip\n" /* get ip to calc return address */ | ||
| 983 | "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 984 | ";;\n" | ||
| 985 | "}\n" | ||
| 986 | "{\n" | ||
| 987 | /* r9 = XEN_PSR_I_ADDR */ | ||
| 988 | "ld8 r9 = [r9]\n" | ||
| 989 | ";;\n" | ||
| 990 | /* r10 = masked previous value */ | ||
| 991 | "(p6) ld1.acq r10 = [r9]\n" | ||
| 992 | "adds r17 = 1f - 1b, r17\n" /* calculate return address */ | ||
| 993 | ";;\n" | ||
| 994 | "}\n" | ||
| 995 | "{\n" | ||
| 996 | /* p8 = !masked interrupt masked previously? */ | ||
| 997 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
| 998 | "\n" | ||
| 999 | /* p7 = else clause */ | ||
| 1000 | "(p7) mov r11 = 1\n" | ||
| 1001 | ";;\n" | ||
| 1002 | "(p8) mov b6 = r17\n" /* set return address */ | ||
| 1003 | "}\n" | ||
| 1004 | "{\n" | ||
| 1005 | /* masked = 1 */ | ||
| 1006 | "(p7) st1.rel [r9] = r11\n" | ||
| 1007 | "\n" | ||
| 1008 | "[99:]\n" | ||
| 1009 | "(p8) brl.cond.dptk.few xen_check_events\n" | ||
| 1010 | "}\n" | ||
| 1011 | /* pv calling stub is 5 bundles. fill nop to adjust return address */ | ||
| 1012 | "{\n" | ||
| 1013 | "nop 0\n" | ||
| 1014 | "nop 0\n" | ||
| 1015 | "nop 0\n" | ||
| 1016 | "}\n" | ||
| 1017 | "1:\n" | ||
| 1018 | "__xen_intrin_local_irq_restore_direct_end:\n" | ||
| 1019 | ".endp __xen_intrin_local_irq_restore_direct\n" | ||
| 1020 | "\n" | ||
| 1021 | ".align 8\n" | ||
| 1022 | "__xen_intrin_local_irq_restore_direct_reloc:\n" | ||
| 1023 | "data8 99b\n" | ||
| 1024 | ); | ||
| 1025 | |||
| 1026 | static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] | ||
| 1027 | __initdata_or_module = | ||
| 1028 | { | ||
| 1029 | #define XEN_PATCH_BUNDLE_ELEM(name, type) \ | ||
| 1030 | { \ | ||
| 1031 | (void*)xen_ ## name ## _direct_start, \ | ||
| 1032 | (void*)xen_ ## name ## _direct_end, \ | ||
| 1033 | PARAVIRT_PATCH_TYPE_ ## type, \ | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | XEN_PATCH_BUNDLE_ELEM(fc, FC), | ||
| 1037 | XEN_PATCH_BUNDLE_ELEM(thash, THASH), | ||
| 1038 | XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), | ||
| 1039 | XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), | ||
| 1040 | XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), | ||
| 1041 | XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), | ||
| 1042 | XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), | ||
| 1043 | XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), | ||
| 1044 | XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), | ||
| 1045 | XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), | ||
| 1046 | XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), | ||
| 1047 | { | ||
| 1048 | (void*)__xen_intrin_local_irq_restore_direct_start, | ||
| 1049 | (void*)__xen_intrin_local_irq_restore_direct_end, | ||
| 1050 | PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, | ||
| 1051 | }, | ||
| 1052 | |||
| 1053 | #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ | ||
| 1054 | { \ | ||
| 1055 | xen_get_ ## name ## _direct_start, \ | ||
| 1056 | xen_get_ ## name ## _direct_end, \ | ||
| 1057 | PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ | ||
| 1058 | } | ||
| 1059 | |||
| 1060 | XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), | ||
| 1061 | XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), | ||
| 1062 | |||
| 1063 | XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), | ||
| 1064 | XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), | ||
| 1065 | |||
| 1066 | XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), | ||
| 1067 | XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), | ||
| 1068 | |||
| 1069 | |||
| 1070 | #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
| 1071 | { \ | ||
| 1072 | xen_ ## name ## _direct_start, \ | ||
| 1073 | xen_ ## name ## _direct_end, \ | ||
| 1074 | PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
| 1078 | __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) | ||
| 1079 | |||
| 1080 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), | ||
| 1081 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), | ||
| 1082 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), | ||
| 1083 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), | ||
| 1084 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), | ||
| 1085 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), | ||
| 1086 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), | ||
| 1087 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), | ||
| 1088 | |||
| 1089 | XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), | ||
| 1090 | XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), | ||
| 1091 | __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), | ||
| 1092 | |||
| 1093 | XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), | ||
| 1094 | XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), | ||
| 1095 | }; | ||
| 1096 | |||
| 1097 | static unsigned long __init_or_module | ||
| 1098 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) | ||
| 1099 | { | ||
| 1100 | const unsigned long nelems = sizeof(xen_patch_bundle_elems) / | ||
| 1101 | sizeof(xen_patch_bundle_elems[0]); | ||
| 1102 | unsigned long used; | ||
| 1103 | const struct paravirt_patch_bundle_elem *found; | ||
| 1104 | |||
| 1105 | used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, | ||
| 1106 | xen_patch_bundle_elems, nelems, | ||
| 1107 | &found); | ||
| 1108 | |||
| 1109 | if (found == NULL) | ||
| 1110 | /* fallback */ | ||
| 1111 | return ia64_native_patch_bundle(sbundle, ebundle, type); | ||
| 1112 | if (used == 0) | ||
| 1113 | return used; | ||
| 1114 | |||
| 1115 | /* relocation */ | ||
| 1116 | switch (type) { | ||
| 1117 | case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { | ||
| 1118 | unsigned long reloc = | ||
| 1119 | __xen_intrin_local_irq_restore_direct_reloc; | ||
| 1120 | unsigned long reloc_offset = reloc - (unsigned long) | ||
| 1121 | __xen_intrin_local_irq_restore_direct_start; | ||
| 1122 | unsigned long tag = (unsigned long)sbundle + reloc_offset; | ||
| 1123 | paravirt_patch_reloc_brl(tag, xen_check_events); | ||
| 1124 | break; | ||
| 1125 | } | ||
| 1126 | default: | ||
| 1127 | /* nothing */ | ||
| 1128 | break; | ||
| 1129 | } | ||
| 1130 | return used; | ||
| 1131 | } | ||
| 1132 | #endif /* ASM_SUPPOTED */ | ||
| 1133 | |||
| 1134 | const struct paravirt_patch_branch_target xen_branch_target[] | ||
| 1135 | __initconst = { | ||
| 1136 | #define PARAVIRT_BR_TARGET(name, type) \ | ||
| 1137 | { \ | ||
| 1138 | &xen_ ## name, \ | ||
| 1139 | PARAVIRT_PATCH_TYPE_BR_ ## type, \ | ||
| 1140 | } | ||
| 1141 | PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), | ||
| 1142 | PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), | ||
| 1143 | PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), | ||
| 1144 | PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), | ||
| 1145 | }; | ||
| 1146 | |||
| 1147 | static void __init | ||
| 1148 | xen_patch_branch(unsigned long tag, unsigned long type) | ||
| 1149 | { | ||
| 1150 | const unsigned long nelem = | ||
| 1151 | sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); | ||
| 1152 | __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); | ||
| 1153 | } | ||
