diff options
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/mmu.c | 10 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 18 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 17 |
3 files changed, 26 insertions, 19 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 0c376a2d9f98..832765c0fb8c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1036,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm) | |||
1036 | */ | 1036 | */ |
1037 | void xen_mm_pin_all(void) | 1037 | void xen_mm_pin_all(void) |
1038 | { | 1038 | { |
1039 | unsigned long flags; | ||
1040 | struct page *page; | 1039 | struct page *page; |
1041 | 1040 | ||
1042 | spin_lock_irqsave(&pgd_lock, flags); | 1041 | spin_lock(&pgd_lock); |
1043 | 1042 | ||
1044 | list_for_each_entry(page, &pgd_list, lru) { | 1043 | list_for_each_entry(page, &pgd_list, lru) { |
1045 | if (!PagePinned(page)) { | 1044 | if (!PagePinned(page)) { |
@@ -1048,7 +1047,7 @@ void xen_mm_pin_all(void) | |||
1048 | } | 1047 | } |
1049 | } | 1048 | } |
1050 | 1049 | ||
1051 | spin_unlock_irqrestore(&pgd_lock, flags); | 1050 | spin_unlock(&pgd_lock); |
1052 | } | 1051 | } |
1053 | 1052 | ||
1054 | /* | 1053 | /* |
@@ -1149,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) | |||
1149 | */ | 1148 | */ |
1150 | void xen_mm_unpin_all(void) | 1149 | void xen_mm_unpin_all(void) |
1151 | { | 1150 | { |
1152 | unsigned long flags; | ||
1153 | struct page *page; | 1151 | struct page *page; |
1154 | 1152 | ||
1155 | spin_lock_irqsave(&pgd_lock, flags); | 1153 | spin_lock(&pgd_lock); |
1156 | 1154 | ||
1157 | list_for_each_entry(page, &pgd_list, lru) { | 1155 | list_for_each_entry(page, &pgd_list, lru) { |
1158 | if (PageSavePinned(page)) { | 1156 | if (PageSavePinned(page)) { |
@@ -1162,7 +1160,7 @@ void xen_mm_unpin_all(void) | |||
1162 | } | 1160 | } |
1163 | } | 1161 | } |
1164 | 1162 | ||
1165 | spin_unlock_irqrestore(&pgd_lock, flags); | 1163 | spin_unlock(&pgd_lock); |
1166 | } | 1164 | } |
1167 | 1165 | ||
1168 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1166 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 65f21f4b3962..00fe5604c593 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -374,21 +374,15 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
374 | * As long as the mfn_list has enough entries to completely | 374 | * As long as the mfn_list has enough entries to completely |
375 | * fill a p2m page, pointing into the array is ok. But if | 375 | * fill a p2m page, pointing into the array is ok. But if |
376 | * not the entries beyond the last pfn will be undefined. | 376 | * not the entries beyond the last pfn will be undefined. |
377 | * And guessing that the 'what-ever-there-is' does not take it | ||
378 | * too kindly when changing it to invalid markers, a new page | ||
379 | * is allocated, initialized and filled with the valid part. | ||
380 | */ | 377 | */ |
381 | if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { | 378 | if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { |
382 | unsigned long p2midx; | 379 | unsigned long p2midx; |
383 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | 380 | |
384 | p2m_init(p2m); | 381 | p2midx = max_pfn % P2M_PER_PAGE; |
385 | 382 | for ( ; p2midx < P2M_PER_PAGE; p2midx++) | |
386 | for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) { | 383 | mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; |
387 | p2m[p2midx] = mfn_list[pfn + p2midx]; | 384 | } |
388 | } | 385 | p2m_top[topidx][mididx] = &mfn_list[pfn]; |
389 | p2m_top[topidx][mididx] = p2m; | ||
390 | } else | ||
391 | p2m_top[topidx][mididx] = &mfn_list[pfn]; | ||
392 | } | 386 | } |
393 | 387 | ||
394 | m2p_override_init(); | 388 | m2p_override_init(); |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 54d93791ddb9..fa0269a99377 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -229,8 +229,13 @@ char * __init xen_memory_setup(void) | |||
229 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
230 | xen_extra_mem_start = mem_end; | 230 | xen_extra_mem_start = mem_end; |
231 | for (i = 0; i < memmap.nr_entries; i++) { | 231 | for (i = 0; i < memmap.nr_entries; i++) { |
232 | unsigned long long end = map[i].addr + map[i].size; | 232 | unsigned long long end; |
233 | 233 | ||
234 | /* Guard against non-page aligned E820 entries. */ | ||
235 | if (map[i].type == E820_RAM) | ||
236 | map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; | ||
237 | |||
238 | end = map[i].addr + map[i].size; | ||
234 | if (map[i].type == E820_RAM && end > mem_end) { | 239 | if (map[i].type == E820_RAM && end > mem_end) { |
235 | /* RAM off the end - may be partially included */ | 240 | /* RAM off the end - may be partially included */ |
236 | u64 delta = min(map[i].size, end - mem_end); | 241 | u64 delta = min(map[i].size, end - mem_end); |
@@ -239,6 +244,15 @@ char * __init xen_memory_setup(void) | |||
239 | end -= delta; | 244 | end -= delta; |
240 | 245 | ||
241 | extra_pages += PFN_DOWN(delta); | 246 | extra_pages += PFN_DOWN(delta); |
247 | /* | ||
248 | * Set RAM below 4GB that is not for us to be unusable. | ||
249 | * This prevents "System RAM" address space from being | ||
250 | * used as potential resource for I/O address (happens | ||
251 | * when 'allocate_resource' is called). | ||
252 | */ | ||
253 | if (delta && | ||
254 | (xen_initial_domain() && end < 0x100000000ULL)) | ||
255 | e820_add_region(end, delta, E820_UNUSABLE); | ||
242 | } | 256 | } |
243 | 257 | ||
244 | if (map[i].size > 0 && end > xen_extra_mem_start) | 258 | if (map[i].size > 0 && end > xen_extra_mem_start) |
@@ -407,6 +421,7 @@ void __init xen_arch_setup(void) | |||
407 | boot_cpu_data.hlt_works_ok = 1; | 421 | boot_cpu_data.hlt_works_ok = 1; |
408 | #endif | 422 | #endif |
409 | pm_idle = default_idle; | 423 | pm_idle = default_idle; |
424 | boot_option_idle_override = IDLE_HALT; | ||
410 | 425 | ||
411 | fiddle_vdso(); | 426 | fiddle_vdso(); |
412 | } | 427 | } |