diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-05-26 18:31:27 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-27 04:11:38 -0400 |
commit | 0e91398f2a5d4eb6b07df8115917d0d1cf3e9b58 (patch) | |
tree | c6a3b31b7bcbbfb55bb2304d8651abdd28cdad54 /arch/x86/xen/mmu.c | |
parent | 7d88d32a4670af583c896e5ecd3929b78538ca62 (diff) |
xen: implement save/restore
This patch implements Xen save/restore and migration.
Saving is triggered via xenbus, which is polled in
drivers/xen/manage.c. When a suspend request comes in, the kernel
prepares itself for saving by:
1 - Freeze all processes. This is primarily to prevent any
partially-completed pagetable updates from confusing the suspend
process. If CONFIG_PREEMPT isn't defined, then this isn't necessary.
2 - Suspend xenbus and other devices
3 - Stop_machine, to make sure all the other vcpus are quiescent. The
Xen tools require the domain to run its save off vcpu0.
4 - Within the stop_machine state, it pins any unpinned pgds (under
construction or destruction), performs canonicalizes various other
pieces of state (mostly converting mfns to pfns), and finally
5 - Suspend the domain
Restore reverses the steps used to save the domain, ending when all
the frozen processes are thawed.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4740cda36563..e95955968ba3 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -560,6 +560,29 @@ void xen_pgd_pin(pgd_t *pgd) | |||
560 | xen_mc_issue(0); | 560 | xen_mc_issue(0); |
561 | } | 561 | } |
562 | 562 | ||
563 | /* | ||
564 | * On save, we need to pin all pagetables to make sure they get their | ||
565 | * mfns turned into pfns. Search the list for any unpinned pgds and pin | ||
566 | * them (unpinned pgds are not currently in use, probably because the | ||
567 | * process is under construction or destruction). | ||
568 | */ | ||
569 | void xen_mm_pin_all(void) | ||
570 | { | ||
571 | unsigned long flags; | ||
572 | struct page *page; | ||
573 | |||
574 | spin_lock_irqsave(&pgd_lock, flags); | ||
575 | |||
576 | list_for_each_entry(page, &pgd_list, lru) { | ||
577 | if (!PagePinned(page)) { | ||
578 | xen_pgd_pin((pgd_t *)page_address(page)); | ||
579 | SetPageSavePinned(page); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
584 | } | ||
585 | |||
563 | /* The init_mm pagetable is really pinned as soon as its created, but | 586 | /* The init_mm pagetable is really pinned as soon as its created, but |
564 | that's before we have page structures to store the bits. So do all | 587 | that's before we have page structures to store the bits. So do all |
565 | the book-keeping now. */ | 588 | the book-keeping now. */ |
@@ -617,6 +640,29 @@ static void xen_pgd_unpin(pgd_t *pgd) | |||
617 | xen_mc_issue(0); | 640 | xen_mc_issue(0); |
618 | } | 641 | } |
619 | 642 | ||
643 | /* | ||
644 | * On resume, undo any pinning done at save, so that the rest of the | ||
645 | * kernel doesn't see any unexpected pinned pagetables. | ||
646 | */ | ||
647 | void xen_mm_unpin_all(void) | ||
648 | { | ||
649 | unsigned long flags; | ||
650 | struct page *page; | ||
651 | |||
652 | spin_lock_irqsave(&pgd_lock, flags); | ||
653 | |||
654 | list_for_each_entry(page, &pgd_list, lru) { | ||
655 | if (PageSavePinned(page)) { | ||
656 | BUG_ON(!PagePinned(page)); | ||
657 | printk("unpinning pinned %p\n", page_address(page)); | ||
658 | xen_pgd_unpin((pgd_t *)page_address(page)); | ||
659 | ClearPageSavePinned(page); | ||
660 | } | ||
661 | } | ||
662 | |||
663 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
664 | } | ||
665 | |||
620 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 666 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
621 | { | 667 | { |
622 | spin_lock(&next->page_table_lock); | 668 | spin_lock(&next->page_table_lock); |