aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2014-11-28 05:53:52 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 09:08:42 -0500
commit7108c9ce8f6e59f775b0c8250dba52b569b6cba2 (patch)
treed8fd536589c186c920df0496a5d9d5b54f826eac
parent820c4db2be4ec179210b5c69103a5b2858513e8a (diff)
xen: use common page allocation function in p2m.c
In arch/x86/xen/p2m.c three different allocation functions for obtaining a memory page are used: extend_brk(), alloc_bootmem_align() or __get_free_page(). Which of those functions is used depends on the progress of the boot process of the system. Introduce a common allocation routine selecting the to be called allocation routine dynamically based on the boot progress. This allows moving initialization steps without having to care about changing allocation calls. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/p2m.c57
2 files changed, 37 insertions, 22 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index a8a1a3d08d4d..b995b871da02 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1219,6 +1219,8 @@ static void __init xen_pagetable_init(void)
1219 paging_init(); 1219 paging_init();
1220#ifdef CONFIG_X86_64 1220#ifdef CONFIG_X86_64
1221 xen_pagetable_p2m_copy(); 1221 xen_pagetable_p2m_copy();
1222#else
1223 xen_revector_p2m_tree();
1222#endif 1224#endif
1223 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1225 /* Allocate and initialize top and mid mfn levels for p2m structure */
1224 xen_build_mfn_list_list(); 1226 xen_build_mfn_list_list();
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 2d8b9086c3ec..fa53dc2bc589 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -164,6 +164,7 @@
164#include <linux/sched.h> 164#include <linux/sched.h>
165#include <linux/seq_file.h> 165#include <linux/seq_file.h>
166#include <linux/bootmem.h> 166#include <linux/bootmem.h>
167#include <linux/slab.h>
167 168
168#include <asm/cache.h> 169#include <asm/cache.h>
169#include <asm/setup.h> 170#include <asm/setup.h>
@@ -204,6 +205,8 @@ RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER
204 */ 205 */
205RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES); 206RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
206 207
208static int use_brk = 1;
209
207static inline unsigned p2m_top_index(unsigned long pfn) 210static inline unsigned p2m_top_index(unsigned long pfn)
208{ 211{
209 BUG_ON(pfn >= MAX_P2M_PFN); 212 BUG_ON(pfn >= MAX_P2M_PFN);
@@ -268,6 +271,24 @@ static void p2m_init(unsigned long *p2m)
268 p2m[i] = INVALID_P2M_ENTRY; 271 p2m[i] = INVALID_P2M_ENTRY;
269} 272}
270 273
274static void * __ref alloc_p2m_page(void)
275{
276 if (unlikely(use_brk))
277 return extend_brk(PAGE_SIZE, PAGE_SIZE);
278
279 if (unlikely(!slab_is_available()))
280 return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
281
282 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
283}
284
285/* Only to be called in case of a race for a page just allocated! */
286static void free_p2m_page(void *p)
287{
288 BUG_ON(!slab_is_available());
289 free_page((unsigned long)p);
290}
291
271/* 292/*
272 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures 293 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
273 * 294 *
@@ -287,13 +308,13 @@ void __ref xen_build_mfn_list_list(void)
287 308
288 /* Pre-initialize p2m_top_mfn to be completely missing */ 309 /* Pre-initialize p2m_top_mfn to be completely missing */
289 if (p2m_top_mfn == NULL) { 310 if (p2m_top_mfn == NULL) {
290 p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); 311 p2m_mid_missing_mfn = alloc_p2m_page();
291 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); 312 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
292 313
293 p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); 314 p2m_top_mfn_p = alloc_p2m_page();
294 p2m_top_mfn_p_init(p2m_top_mfn_p); 315 p2m_top_mfn_p_init(p2m_top_mfn_p);
295 316
296 p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); 317 p2m_top_mfn = alloc_p2m_page();
297 p2m_top_mfn_init(p2m_top_mfn); 318 p2m_top_mfn_init(p2m_top_mfn);
298 } else { 319 } else {
299 /* Reinitialise, mfn's all change after migration */ 320 /* Reinitialise, mfn's all change after migration */
@@ -327,7 +348,7 @@ void __ref xen_build_mfn_list_list(void)
327 * missing parts of the mfn tree after 348 * missing parts of the mfn tree after
328 * runtime. 349 * runtime.
329 */ 350 */
330 mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); 351 mid_mfn_p = alloc_p2m_page();
331 p2m_mid_mfn_init(mid_mfn_p, p2m_missing); 352 p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
332 353
333 p2m_top_mfn_p[topidx] = mid_mfn_p; 354 p2m_top_mfn_p[topidx] = mid_mfn_p;
@@ -364,17 +385,17 @@ void __init xen_build_dynamic_phys_to_machine(void)
364 max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); 385 max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
365 xen_max_p2m_pfn = max_pfn; 386 xen_max_p2m_pfn = max_pfn;
366 387
367 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); 388 p2m_missing = alloc_p2m_page();
368 p2m_init(p2m_missing); 389 p2m_init(p2m_missing);
369 p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); 390 p2m_identity = alloc_p2m_page();
370 p2m_init(p2m_identity); 391 p2m_init(p2m_identity);
371 392
372 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); 393 p2m_mid_missing = alloc_p2m_page();
373 p2m_mid_init(p2m_mid_missing, p2m_missing); 394 p2m_mid_init(p2m_mid_missing, p2m_missing);
374 p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); 395 p2m_mid_identity = alloc_p2m_page();
375 p2m_mid_init(p2m_mid_identity, p2m_identity); 396 p2m_mid_init(p2m_mid_identity, p2m_identity);
376 397
377 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); 398 p2m_top = alloc_p2m_page();
378 p2m_top_init(p2m_top); 399 p2m_top_init(p2m_top);
379 400
380 /* 401 /*
@@ -387,7 +408,7 @@ void __init xen_build_dynamic_phys_to_machine(void)
387 unsigned mididx = p2m_mid_index(pfn); 408 unsigned mididx = p2m_mid_index(pfn);
388 409
389 if (p2m_top[topidx] == p2m_mid_missing) { 410 if (p2m_top[topidx] == p2m_mid_missing) {
390 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 411 unsigned long **mid = alloc_p2m_page();
391 p2m_mid_init(mid, p2m_missing); 412 p2m_mid_init(mid, p2m_missing);
392 413
393 p2m_top[topidx] = mid; 414 p2m_top[topidx] = mid;
@@ -420,6 +441,7 @@ unsigned long __init xen_revector_p2m_tree(void)
420 unsigned long *mfn_list = NULL; 441 unsigned long *mfn_list = NULL;
421 unsigned long size; 442 unsigned long size;
422 443
444 use_brk = 0;
423 va_start = xen_start_info->mfn_list; 445 va_start = xen_start_info->mfn_list;
424 /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long), 446 /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long),
425 * so make sure it is rounded up to that */ 447 * so make sure it is rounded up to that */
@@ -484,6 +506,7 @@ unsigned long __init xen_revector_p2m_tree(void)
484#else 506#else
485unsigned long __init xen_revector_p2m_tree(void) 507unsigned long __init xen_revector_p2m_tree(void)
486{ 508{
509 use_brk = 0;
487 return 0; 510 return 0;
488} 511}
489#endif 512#endif
@@ -510,16 +533,6 @@ unsigned long get_phys_to_machine(unsigned long pfn)
510} 533}
511EXPORT_SYMBOL_GPL(get_phys_to_machine); 534EXPORT_SYMBOL_GPL(get_phys_to_machine);
512 535
513static void *alloc_p2m_page(void)
514{
515 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
516}
517
518static void free_p2m_page(void *p)
519{
520 free_page((unsigned long)p);
521}
522
523/* 536/*
524 * Fully allocate the p2m structure for a given pfn. We need to check 537 * Fully allocate the p2m structure for a given pfn. We need to check
525 * that both the top and mid levels are allocated, and make sure the 538 * that both the top and mid levels are allocated, and make sure the
@@ -624,7 +637,7 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
624 return false; 637 return false;
625 638
626 /* Boundary cross-over for the edges: */ 639 /* Boundary cross-over for the edges: */
627 p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); 640 p2m = alloc_p2m_page();
628 641
629 p2m_init(p2m); 642 p2m_init(p2m);
630 643
@@ -640,7 +653,7 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
640 653
641 mid = p2m_top[topidx]; 654 mid = p2m_top[topidx];
642 if (mid == p2m_mid_missing) { 655 if (mid == p2m_mid_missing) {
643 mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 656 mid = alloc_p2m_page();
644 657
645 p2m_mid_init(mid, p2m_missing); 658 p2m_mid_init(mid, p2m_missing);
646 659