diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-25 20:31:59 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-25 20:31:59 -0400 |
| commit | 267560874cb0189f28e7ae6dfbc8e98b8848be98 (patch) | |
| tree | 649702ebe9faf5679d6e81c08dc07c4fcb04a2dd | |
| parent | 4ae4614712e0a354c9e6c286c0db1612246766ce (diff) | |
| parent | c96aae1f7f393387d160211f60398d58463a7e65 (diff) | |
Merge tag 'stable/for-linus-3.6-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull three xen bug-fixes from Konrad Rzeszutek Wilk:
- Revert the kexec fix which caused on non-kexec shutdowns a race.
- Reuse existing P2M leafs - instead of requiring to allocate a large
area of bootup virtual address estate.
- Fix a one-off error when adding PFNs for balloon pages.
* tag 'stable/for-linus-3.6-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen/setup: Fix one-off error when adding for-balloon PFNs to the P2M.
xen/p2m: Reuse existing P2M leafs if they are filled with 1:1 PFNs or INVALID.
Revert "xen PVonHVM: move shared_info to MMIO before kexec"
| -rw-r--r-- | arch/x86/xen/enlighten.c | 118 | ||||
| -rw-r--r-- | arch/x86/xen/p2m.c | 95 | ||||
| -rw-r--r-- | arch/x86/xen/setup.c | 9 | ||||
| -rw-r--r-- | arch/x86/xen/suspend.c | 2 | ||||
| -rw-r--r-- | arch/x86/xen/xen-ops.h | 2 | ||||
| -rw-r--r-- | drivers/xen/platform-pci.c | 15 | ||||
| -rw-r--r-- | include/xen/events.h | 2 |
7 files changed, 113 insertions, 130 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bf4bda6d3e9a..9642d4a38602 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
| 32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
| 33 | #include <linux/memblock.h> | 33 | #include <linux/memblock.h> |
| 34 | #include <linux/syscore_ops.h> | ||
| 35 | 34 | ||
| 36 | #include <xen/xen.h> | 35 | #include <xen/xen.h> |
| 37 | #include <xen/interface/xen.h> | 36 | #include <xen/interface/xen.h> |
| @@ -1470,130 +1469,38 @@ asmlinkage void __init xen_start_kernel(void) | |||
| 1470 | #endif | 1469 | #endif |
| 1471 | } | 1470 | } |
| 1472 | 1471 | ||
| 1473 | #ifdef CONFIG_XEN_PVHVM | 1472 | void __ref xen_hvm_init_shared_info(void) |
| 1474 | /* | ||
| 1475 | * The pfn containing the shared_info is located somewhere in RAM. This | ||
| 1476 | * will cause trouble if the current kernel is doing a kexec boot into a | ||
| 1477 | * new kernel. The new kernel (and its startup code) can not know where | ||
| 1478 | * the pfn is, so it can not reserve the page. The hypervisor will | ||
| 1479 | * continue to update the pfn, and as a result memory corruption occours | ||
| 1480 | * in the new kernel. | ||
| 1481 | * | ||
| 1482 | * One way to work around this issue is to allocate a page in the | ||
| 1483 | * xen-platform pci device's BAR memory range. But pci init is done very | ||
| 1484 | * late and the shared_info page is already in use very early to read | ||
| 1485 | * the pvclock. So moving the pfn from RAM to MMIO is racy because some | ||
| 1486 | * code paths on other vcpus could access the pfn during the small | ||
| 1487 | * window when the old pfn is moved to the new pfn. There is even a | ||
| 1488 | * small window were the old pfn is not backed by a mfn, and during that | ||
| 1489 | * time all reads return -1. | ||
| 1490 | * | ||
| 1491 | * Because it is not known upfront where the MMIO region is located it | ||
| 1492 | * can not be used right from the start in xen_hvm_init_shared_info. | ||
| 1493 | * | ||
| 1494 | * To minimise trouble the move of the pfn is done shortly before kexec. | ||
| 1495 | * This does not eliminate the race because all vcpus are still online | ||
| 1496 | * when the syscore_ops will be called. But hopefully there is no work | ||
| 1497 | * pending at this point in time. Also the syscore_op is run last which | ||
| 1498 | * reduces the risk further. | ||
| 1499 | */ | ||
| 1500 | |||
| 1501 | static struct shared_info *xen_hvm_shared_info; | ||
| 1502 | |||
| 1503 | static void xen_hvm_connect_shared_info(unsigned long pfn) | ||
| 1504 | { | 1473 | { |
| 1474 | int cpu; | ||
| 1505 | struct xen_add_to_physmap xatp; | 1475 | struct xen_add_to_physmap xatp; |
| 1476 | static struct shared_info *shared_info_page = 0; | ||
| 1506 | 1477 | ||
| 1478 | if (!shared_info_page) | ||
| 1479 | shared_info_page = (struct shared_info *) | ||
| 1480 | extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 1507 | xatp.domid = DOMID_SELF; | 1481 | xatp.domid = DOMID_SELF; |
| 1508 | xatp.idx = 0; | 1482 | xatp.idx = 0; |
| 1509 | xatp.space = XENMAPSPACE_shared_info; | 1483 | xatp.space = XENMAPSPACE_shared_info; |
| 1510 | xatp.gpfn = pfn; | 1484 | xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; |
| 1511 | if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) | 1485 | if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) |
| 1512 | BUG(); | 1486 | BUG(); |
| 1513 | 1487 | ||
| 1514 | } | 1488 | HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; |
| 1515 | static void xen_hvm_set_shared_info(struct shared_info *sip) | ||
| 1516 | { | ||
| 1517 | int cpu; | ||
| 1518 | |||
| 1519 | HYPERVISOR_shared_info = sip; | ||
| 1520 | 1489 | ||
| 1521 | /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info | 1490 | /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info |
| 1522 | * page, we use it in the event channel upcall and in some pvclock | 1491 | * page, we use it in the event channel upcall and in some pvclock |
| 1523 | * related functions. We don't need the vcpu_info placement | 1492 | * related functions. We don't need the vcpu_info placement |
| 1524 | * optimizations because we don't use any pv_mmu or pv_irq op on | 1493 | * optimizations because we don't use any pv_mmu or pv_irq op on |
| 1525 | * HVM. | 1494 | * HVM. |
| 1526 | * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is | 1495 | * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is |
| 1527 | * online but xen_hvm_set_shared_info is run at resume time too and | 1496 | * online but xen_hvm_init_shared_info is run at resume time too and |
| 1528 | * in that case multiple vcpus might be online. */ | 1497 | * in that case multiple vcpus might be online. */ |
| 1529 | for_each_online_cpu(cpu) { | 1498 | for_each_online_cpu(cpu) { |
| 1530 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | 1499 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; |
| 1531 | } | 1500 | } |
| 1532 | } | 1501 | } |
| 1533 | 1502 | ||
| 1534 | /* Reconnect the shared_info pfn to a mfn */ | 1503 | #ifdef CONFIG_XEN_PVHVM |
| 1535 | void xen_hvm_resume_shared_info(void) | ||
| 1536 | { | ||
| 1537 | xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | #ifdef CONFIG_KEXEC | ||
| 1541 | static struct shared_info *xen_hvm_shared_info_kexec; | ||
| 1542 | static unsigned long xen_hvm_shared_info_pfn_kexec; | ||
| 1543 | |||
| 1544 | /* Remember a pfn in MMIO space for kexec reboot */ | ||
| 1545 | void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn) | ||
| 1546 | { | ||
| 1547 | xen_hvm_shared_info_kexec = sip; | ||
| 1548 | xen_hvm_shared_info_pfn_kexec = pfn; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | static void xen_hvm_syscore_shutdown(void) | ||
| 1552 | { | ||
| 1553 | struct xen_memory_reservation reservation = { | ||
| 1554 | .domid = DOMID_SELF, | ||
| 1555 | .nr_extents = 1, | ||
| 1556 | }; | ||
| 1557 | unsigned long prev_pfn; | ||
| 1558 | int rc; | ||
| 1559 | |||
| 1560 | if (!xen_hvm_shared_info_kexec) | ||
| 1561 | return; | ||
| 1562 | |||
| 1563 | prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT; | ||
| 1564 | set_xen_guest_handle(reservation.extent_start, &prev_pfn); | ||
| 1565 | |||
| 1566 | /* Move pfn to MMIO, disconnects previous pfn from mfn */ | ||
| 1567 | xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec); | ||
| 1568 | |||
| 1569 | /* Update pointers, following hypercall is also a memory barrier */ | ||
| 1570 | xen_hvm_set_shared_info(xen_hvm_shared_info_kexec); | ||
| 1571 | |||
| 1572 | /* Allocate new mfn for previous pfn */ | ||
| 1573 | do { | ||
| 1574 | rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); | ||
| 1575 | if (rc == 0) | ||
| 1576 | msleep(123); | ||
| 1577 | } while (rc == 0); | ||
| 1578 | |||
| 1579 | /* Make sure the previous pfn is really connected to a (new) mfn */ | ||
| 1580 | BUG_ON(rc != 1); | ||
| 1581 | } | ||
| 1582 | |||
| 1583 | static struct syscore_ops xen_hvm_syscore_ops = { | ||
| 1584 | .shutdown = xen_hvm_syscore_shutdown, | ||
| 1585 | }; | ||
| 1586 | #endif | ||
| 1587 | |||
| 1588 | /* Use a pfn in RAM, may move to MMIO before kexec. */ | ||
| 1589 | static void __init xen_hvm_init_shared_info(void) | ||
| 1590 | { | ||
| 1591 | /* Remember pointer for resume */ | ||
| 1592 | xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 1593 | xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); | ||
| 1594 | xen_hvm_set_shared_info(xen_hvm_shared_info); | ||
| 1595 | } | ||
| 1596 | |||
| 1597 | static void __init init_hvm_pv_info(void) | 1504 | static void __init init_hvm_pv_info(void) |
| 1598 | { | 1505 | { |
| 1599 | int major, minor; | 1506 | int major, minor; |
| @@ -1644,9 +1551,6 @@ static void __init xen_hvm_guest_init(void) | |||
| 1644 | init_hvm_pv_info(); | 1551 | init_hvm_pv_info(); |
| 1645 | 1552 | ||
| 1646 | xen_hvm_init_shared_info(); | 1553 | xen_hvm_init_shared_info(); |
| 1647 | #ifdef CONFIG_KEXEC | ||
| 1648 | register_syscore_ops(&xen_hvm_syscore_ops); | ||
| 1649 | #endif | ||
| 1650 | 1554 | ||
| 1651 | if (xen_feature(XENFEAT_hvm_callback_vector)) | 1555 | if (xen_feature(XENFEAT_hvm_callback_vector)) |
| 1652 | xen_have_vector_callback = 1; | 1556 | xen_have_vector_callback = 1; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b2e91d40a4cb..d4b255463253 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
| @@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); | |||
| 196 | 196 | ||
| 197 | /* When we populate back during bootup, the amount of pages can vary. The | 197 | /* When we populate back during bootup, the amount of pages can vary. The |
| 198 | * max we have is seen is 395979, but that does not mean it can't be more. | 198 | * max we have is seen is 395979, but that does not mean it can't be more. |
| 199 | * But some machines can have 3GB I/O holes even. So lets reserve enough | 199 | * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle |
| 200 | * for 4GB of I/O and E820 holes. */ | 200 | * it can re-use Xen provided mfn_list array, so we only need to allocate at |
| 201 | RESERVE_BRK(p2m_populated, PMD_SIZE * 4); | 201 | * most three P2M top nodes. */ |
| 202 | RESERVE_BRK(p2m_populated, PAGE_SIZE * 3); | ||
| 203 | |||
| 202 | static inline unsigned p2m_top_index(unsigned long pfn) | 204 | static inline unsigned p2m_top_index(unsigned long pfn) |
| 203 | { | 205 | { |
| 204 | BUG_ON(pfn >= MAX_P2M_PFN); | 206 | BUG_ON(pfn >= MAX_P2M_PFN); |
| @@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn) | |||
| 575 | } | 577 | } |
| 576 | return true; | 578 | return true; |
| 577 | } | 579 | } |
| 580 | |||
| 581 | /* | ||
| 582 | * Skim over the P2M tree looking at pages that are either filled with | ||
| 583 | * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and | ||
| 584 | * replace the P2M leaf with a p2m_missing or p2m_identity. | ||
| 585 | * Stick the old page in the new P2M tree location. | ||
| 586 | */ | ||
| 587 | bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) | ||
| 588 | { | ||
| 589 | unsigned topidx; | ||
| 590 | unsigned mididx; | ||
| 591 | unsigned ident_pfns; | ||
| 592 | unsigned inv_pfns; | ||
| 593 | unsigned long *p2m; | ||
| 594 | unsigned long *mid_mfn_p; | ||
| 595 | unsigned idx; | ||
| 596 | unsigned long pfn; | ||
| 597 | |||
| 598 | /* We only look when this entails a P2M middle layer */ | ||
| 599 | if (p2m_index(set_pfn)) | ||
| 600 | return false; | ||
| 601 | |||
| 602 | for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { | ||
| 603 | topidx = p2m_top_index(pfn); | ||
| 604 | |||
| 605 | if (!p2m_top[topidx]) | ||
| 606 | continue; | ||
| 607 | |||
| 608 | if (p2m_top[topidx] == p2m_mid_missing) | ||
| 609 | continue; | ||
| 610 | |||
| 611 | mididx = p2m_mid_index(pfn); | ||
| 612 | p2m = p2m_top[topidx][mididx]; | ||
| 613 | if (!p2m) | ||
| 614 | continue; | ||
| 615 | |||
| 616 | if ((p2m == p2m_missing) || (p2m == p2m_identity)) | ||
| 617 | continue; | ||
| 618 | |||
| 619 | if ((unsigned long)p2m == INVALID_P2M_ENTRY) | ||
| 620 | continue; | ||
| 621 | |||
| 622 | ident_pfns = 0; | ||
| 623 | inv_pfns = 0; | ||
| 624 | for (idx = 0; idx < P2M_PER_PAGE; idx++) { | ||
| 625 | /* IDENTITY_PFNs are 1:1 */ | ||
| 626 | if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) | ||
| 627 | ident_pfns++; | ||
| 628 | else if (p2m[idx] == INVALID_P2M_ENTRY) | ||
| 629 | inv_pfns++; | ||
| 630 | else | ||
| 631 | break; | ||
| 632 | } | ||
| 633 | if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) | ||
| 634 | goto found; | ||
| 635 | } | ||
| 636 | return false; | ||
| 637 | found: | ||
| 638 | /* Found one, replace old with p2m_identity or p2m_missing */ | ||
| 639 | p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); | ||
| 640 | /* And the other for save/restore.. */ | ||
| 641 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
| 642 | /* NOTE: Even if it is a p2m_identity it should still be point to | ||
| 643 | * a page filled with INVALID_P2M_ENTRY entries. */ | ||
| 644 | mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); | ||
| 645 | |||
| 646 | /* Reset where we want to stick the old page in. */ | ||
| 647 | topidx = p2m_top_index(set_pfn); | ||
| 648 | mididx = p2m_mid_index(set_pfn); | ||
| 649 | |||
| 650 | /* This shouldn't happen */ | ||
| 651 | if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) | ||
| 652 | early_alloc_p2m(set_pfn); | ||
| 653 | |||
| 654 | if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) | ||
| 655 | return false; | ||
| 656 | |||
| 657 | p2m_init(p2m); | ||
| 658 | p2m_top[topidx][mididx] = p2m; | ||
| 659 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
| 660 | mid_mfn_p[mididx] = virt_to_mfn(p2m); | ||
| 661 | |||
| 662 | return true; | ||
| 663 | } | ||
| 578 | bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 664 | bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
| 579 | { | 665 | { |
| 580 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { | 666 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { |
| 581 | if (!early_alloc_p2m(pfn)) | 667 | if (!early_alloc_p2m(pfn)) |
| 582 | return false; | 668 | return false; |
| 583 | 669 | ||
| 670 | if (early_can_reuse_p2m_middle(pfn, mfn)) | ||
| 671 | return __set_phys_to_machine(pfn, mfn); | ||
| 672 | |||
| 584 | if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) | 673 | if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) |
| 585 | return false; | 674 | return false; |
| 586 | 675 | ||
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index ead85576d54a..d11ca11d14fc 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
| @@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size) | |||
| 78 | memblock_reserve(start, size); | 78 | memblock_reserve(start, size); |
| 79 | 79 | ||
| 80 | xen_max_p2m_pfn = PFN_DOWN(start + size); | 80 | xen_max_p2m_pfn = PFN_DOWN(start + size); |
| 81 | for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { | ||
| 82 | unsigned long mfn = pfn_to_mfn(pfn); | ||
| 83 | |||
| 84 | if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) | ||
| 85 | continue; | ||
| 86 | WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", | ||
| 87 | pfn, mfn); | ||
| 81 | 88 | ||
| 82 | for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) | ||
| 83 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 89 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
| 90 | } | ||
| 84 | } | 91 | } |
| 85 | 92 | ||
| 86 | static unsigned long __init xen_do_chunk(unsigned long start, | 93 | static unsigned long __init xen_do_chunk(unsigned long start, |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index ae8a00c39de4..45329c8c226e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
| @@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled) | |||
| 30 | { | 30 | { |
| 31 | #ifdef CONFIG_XEN_PVHVM | 31 | #ifdef CONFIG_XEN_PVHVM |
| 32 | int cpu; | 32 | int cpu; |
| 33 | xen_hvm_resume_shared_info(); | 33 | xen_hvm_init_shared_info(); |
| 34 | xen_callback_vector(); | 34 | xen_callback_vector(); |
| 35 | xen_unplug_emulated_devices(); | 35 | xen_unplug_emulated_devices(); |
| 36 | if (xen_feature(XENFEAT_hvm_safe_pvclock)) { | 36 | if (xen_feature(XENFEAT_hvm_safe_pvclock)) { |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 1e4329e04e0f..202d4c150154 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
| @@ -41,7 +41,7 @@ void xen_enable_syscall(void); | |||
| 41 | void xen_vcpu_restore(void); | 41 | void xen_vcpu_restore(void); |
| 42 | 42 | ||
| 43 | void xen_callback_vector(void); | 43 | void xen_callback_vector(void); |
| 44 | void xen_hvm_resume_shared_info(void); | 44 | void xen_hvm_init_shared_info(void); |
| 45 | void xen_unplug_emulated_devices(void); | 45 | void xen_unplug_emulated_devices(void); |
| 46 | 46 | ||
| 47 | void __init xen_build_dynamic_phys_to_machine(void); | 47 | void __init xen_build_dynamic_phys_to_machine(void); |
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index d4c50d63acbc..97ca359ae2bd 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c | |||
| @@ -101,19 +101,6 @@ static int platform_pci_resume(struct pci_dev *pdev) | |||
| 101 | return 0; | 101 | return 0; |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | static void __devinit prepare_shared_info(void) | ||
| 105 | { | ||
| 106 | #ifdef CONFIG_KEXEC | ||
| 107 | unsigned long addr; | ||
| 108 | struct shared_info *hvm_shared_info; | ||
| 109 | |||
| 110 | addr = alloc_xen_mmio(PAGE_SIZE); | ||
| 111 | hvm_shared_info = ioremap(addr, PAGE_SIZE); | ||
| 112 | memset(hvm_shared_info, 0, PAGE_SIZE); | ||
| 113 | xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT); | ||
| 114 | #endif | ||
| 115 | } | ||
| 116 | |||
| 117 | static int __devinit platform_pci_init(struct pci_dev *pdev, | 104 | static int __devinit platform_pci_init(struct pci_dev *pdev, |
| 118 | const struct pci_device_id *ent) | 105 | const struct pci_device_id *ent) |
| 119 | { | 106 | { |
| @@ -151,8 +138,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev, | |||
| 151 | platform_mmio = mmio_addr; | 138 | platform_mmio = mmio_addr; |
| 152 | platform_mmiolen = mmio_len; | 139 | platform_mmiolen = mmio_len; |
| 153 | 140 | ||
| 154 | prepare_shared_info(); | ||
| 155 | |||
| 156 | if (!xen_have_vector_callback) { | 141 | if (!xen_have_vector_callback) { |
| 157 | ret = xen_allocate_irq(pdev); | 142 | ret = xen_allocate_irq(pdev); |
| 158 | if (ret) { | 143 | if (ret) { |
diff --git a/include/xen/events.h b/include/xen/events.h index 9c641deb65d2..04399b28e821 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
| @@ -58,8 +58,6 @@ void notify_remote_via_irq(int irq); | |||
| 58 | 58 | ||
| 59 | void xen_irq_resume(void); | 59 | void xen_irq_resume(void); |
| 60 | 60 | ||
| 61 | void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn); | ||
| 62 | |||
| 63 | /* Clear an irq's pending state, in preparation for polling on it */ | 61 | /* Clear an irq's pending state, in preparation for polling on it */ |
| 64 | void xen_clear_irq_pending(int irq); | 62 | void xen_clear_irq_pending(int irq); |
| 65 | void xen_set_irq_pending(int irq); | 63 | void xen_set_irq_pending(int irq); |
