diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-09-16 15:38:33 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-07-20 15:57:46 -0400 |
commit | f89e048e76da7ac0b4c89e75606ca7a3422886b1 (patch) | |
tree | c195e708de7c5d6bf34accc40584b5cefda9ae92 /arch | |
parent | 093d7b4639951ea3021a6f70d376c3ff31f4740c (diff) |
xen: make sure pages are really part of domain before freeing
Scan the set of pages we're freeing and make sure they're actually
owned by the domain before freeing. This generally won't happen on a
domU (since Xen gives us contigious memory), but it could happen if
there are some hardware mappings passed through.
We only bother going up to the highest page Xen actually claimed to
give us, since there's definitely nothing of ours above that.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/xen/setup.c | 59 |
1 files changed, 38 insertions, 21 deletions
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index e0942630d47a..9deb6bab6c78 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -33,52 +33,69 @@ extern void xen_sysenter_target(void); | |||
33 | extern void xen_syscall_target(void); | 33 | extern void xen_syscall_target(void); |
34 | extern void xen_syscall32_target(void); | 34 | extern void xen_syscall32_target(void); |
35 | 35 | ||
36 | static unsigned long __init xen_release_chunk(phys_addr_t start_addr, phys_addr_t end_addr) | 36 | static unsigned long __init xen_release_chunk(phys_addr_t start_addr, |
37 | phys_addr_t end_addr) | ||
37 | { | 38 | { |
38 | struct xen_memory_reservation reservation = { | 39 | struct xen_memory_reservation reservation = { |
39 | .address_bits = 0, | 40 | .address_bits = 0, |
40 | .extent_order = 0, | 41 | .extent_order = 0, |
41 | .domid = DOMID_SELF | 42 | .domid = DOMID_SELF |
42 | }; | 43 | }; |
43 | unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; | ||
44 | unsigned long start, end; | 44 | unsigned long start, end; |
45 | unsigned long len; | 45 | unsigned long len = 0; |
46 | unsigned long pfn; | 46 | unsigned long pfn; |
47 | int ret; | 47 | int ret; |
48 | 48 | ||
49 | start = PFN_UP(start_addr); | 49 | start = PFN_UP(start_addr); |
50 | end = PFN_UP(end_addr); | 50 | end = PFN_DOWN(end_addr); |
51 | 51 | ||
52 | if (end <= start) | 52 | if (end <= start) |
53 | return 0; | 53 | return 0; |
54 | 54 | ||
55 | len = end - start; | 55 | printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ", |
56 | 56 | start, end); | |
57 | set_xen_guest_handle(reservation.extent_start, &mfn_list[start]); | 57 | for(pfn = start; pfn < end; pfn++) { |
58 | reservation.nr_extents = len; | 58 | unsigned long mfn = pfn_to_mfn(pfn); |
59 | 59 | ||
60 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | 60 | /* Make sure pfn exists to start with */ |
61 | WARN(ret != (end - start), "Failed to release memory %lx-%lx err=%d\n", | 61 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) |
62 | start, end, ret); | 62 | continue; |
63 | 63 | ||
64 | for(pfn = start; pfn < end; pfn++) | 64 | set_xen_guest_handle(reservation.extent_start, &mfn); |
65 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 65 | reservation.nr_extents = 1; |
66 | |||
67 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
68 | &reservation); | ||
69 | WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", | ||
70 | start, end, ret); | ||
71 | if (ret == 1) { | ||
72 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | ||
73 | len++; | ||
74 | } | ||
75 | } | ||
76 | printk(KERN_CONT "%ld pages freed\n", len); | ||
66 | 77 | ||
67 | return len; | 78 | return len; |
68 | } | 79 | } |
69 | 80 | ||
70 | static unsigned long __init xen_return_unused_memory(const struct e820map *e820) | 81 | static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, |
82 | const struct e820map *e820) | ||
71 | { | 83 | { |
72 | unsigned long last_end = 0; | 84 | phys_addr_t max_addr = PFN_PHYS(max_pfn); |
85 | phys_addr_t last_end = 0; | ||
73 | unsigned long released = 0; | 86 | unsigned long released = 0; |
74 | int i; | 87 | int i; |
75 | 88 | ||
76 | for (i = 0; i < e820->nr_map; i++) { | 89 | for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { |
77 | released += xen_release_chunk(last_end, e820->map[i].addr); | 90 | phys_addr_t end = e820->map[i].addr; |
91 | end = min(max_addr, end); | ||
92 | |||
93 | released += xen_release_chunk(last_end, end); | ||
78 | last_end = e820->map[i].addr + e820->map[i].size; | 94 | last_end = e820->map[i].addr + e820->map[i].size; |
79 | } | 95 | } |
80 | 96 | ||
81 | released += xen_release_chunk(last_end, PFN_PHYS(xen_start_info->nr_pages)); | 97 | if (last_end < max_addr) |
98 | released += xen_release_chunk(last_end, max_addr); | ||
82 | 99 | ||
83 | printk(KERN_INFO "released %ld pages of unused memory\n", released); | 100 | printk(KERN_INFO "released %ld pages of unused memory\n", released); |
84 | return released; | 101 | return released; |
@@ -118,7 +135,7 @@ char * __init xen_memory_setup(void) | |||
118 | 135 | ||
119 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 136 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
120 | 137 | ||
121 | xen_return_unused_memory(&e820); | 138 | xen_return_unused_memory(xen_start_info->nr_pages, &e820); |
122 | 139 | ||
123 | return "Xen"; | 140 | return "Xen"; |
124 | } | 141 | } |