diff options
author | Bob Picco <bob.picco@hp.com> | 2006-06-28 12:55:43 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-08-03 13:13:23 -0400 |
commit | e44e41d0c832ebbda7311a1fe43584d844026357 (patch) | |
tree | 05a8ac85029b041db8defef35bf808d431bb3528 /arch/ia64/mm | |
parent | 921eea1cdf6ce7f0db88e4579474a04b1fb0fe6d (diff) |
[IA64] fix show_mem for VIRTUAL_MEM_MAP+FLATMEM
contig.c (FLATMEM) requires the same optimization as in discontig.c for show_mem
when VIRTUAL_MEM_MAP is in use. Otherwise FLATMEM has softlockup timeouts.
This was boot tested for memory configuration: SPARSEMEM,
DISCONTIG+VIRTUAL_MEM_MAP, FLATMEM, FLATMEM+VIRTUAL_MEM_MAP and
FLATMEM+VIRTUAL_MEM_MAP with largest memory gap less than LARGE_GAP by
using boot parameter "mem=".
This was boot tested and "echo m >/proc/sysrq-trigger" output evaluated for
: FLATMEM, FLATMEM+VIRTUAL_MEM_MAP, DISCONTIGMEM+VIRTUAL_MEM_MAP and
SPARSEMEM.
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/contig.c | 13 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 65 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 55 |
3 files changed, 66 insertions, 67 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 8919fed9666a..e004143ba86b 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 28 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
29 | static unsigned long num_dma_physpages; | 29 | static unsigned long num_dma_physpages; |
30 | static unsigned long max_gap; | ||
30 | #endif | 31 | #endif |
31 | 32 | ||
32 | /** | 33 | /** |
@@ -45,9 +46,15 @@ show_mem (void) | |||
45 | 46 | ||
46 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 47 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
47 | i = max_mapnr; | 48 | i = max_mapnr; |
48 | while (i-- > 0) { | 49 | for (i = 0; i < max_mapnr; i++) { |
49 | if (!pfn_valid(i)) | 50 | if (!pfn_valid(i)) { |
51 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
52 | if (max_gap < LARGE_GAP) | ||
53 | continue; | ||
54 | i = vmemmap_find_next_valid_pfn(0, i) - 1; | ||
55 | #endif | ||
50 | continue; | 56 | continue; |
57 | } | ||
51 | total++; | 58 | total++; |
52 | if (PageReserved(mem_map+i)) | 59 | if (PageReserved(mem_map+i)) |
53 | reserved++; | 60 | reserved++; |
@@ -234,7 +241,6 @@ paging_init (void) | |||
234 | unsigned long zones_size[MAX_NR_ZONES]; | 241 | unsigned long zones_size[MAX_NR_ZONES]; |
235 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 242 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
236 | unsigned long zholes_size[MAX_NR_ZONES]; | 243 | unsigned long zholes_size[MAX_NR_ZONES]; |
237 | unsigned long max_gap; | ||
238 | #endif | 244 | #endif |
239 | 245 | ||
240 | /* initialize mem_map[] */ | 246 | /* initialize mem_map[] */ |
@@ -266,7 +272,6 @@ paging_init (void) | |||
266 | } | 272 | } |
267 | } | 273 | } |
268 | 274 | ||
269 | max_gap = 0; | ||
270 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 275 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
271 | if (max_gap < LARGE_GAP) { | 276 | if (max_gap < LARGE_GAP) { |
272 | vmem_map = (struct page *) 0; | 277 | vmem_map = (struct page *) 0; |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 8eeb669917fa..d260bffa01ab 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void) | |||
534 | } | 534 | } |
535 | #endif /* CONFIG_SMP */ | 535 | #endif /* CONFIG_SMP */ |
536 | 536 | ||
537 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
538 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
539 | { | ||
540 | unsigned long end_address, hole_next_pfn; | ||
541 | unsigned long stop_address; | ||
542 | |||
543 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
544 | end_address = PAGE_ALIGN(end_address); | ||
545 | |||
546 | stop_address = (unsigned long) &vmem_map[ | ||
547 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
548 | |||
549 | do { | ||
550 | pgd_t *pgd; | ||
551 | pud_t *pud; | ||
552 | pmd_t *pmd; | ||
553 | pte_t *pte; | ||
554 | |||
555 | pgd = pgd_offset_k(end_address); | ||
556 | if (pgd_none(*pgd)) { | ||
557 | end_address += PGDIR_SIZE; | ||
558 | continue; | ||
559 | } | ||
560 | |||
561 | pud = pud_offset(pgd, end_address); | ||
562 | if (pud_none(*pud)) { | ||
563 | end_address += PUD_SIZE; | ||
564 | continue; | ||
565 | } | ||
566 | |||
567 | pmd = pmd_offset(pud, end_address); | ||
568 | if (pmd_none(*pmd)) { | ||
569 | end_address += PMD_SIZE; | ||
570 | continue; | ||
571 | } | ||
572 | |||
573 | pte = pte_offset_kernel(pmd, end_address); | ||
574 | retry_pte: | ||
575 | if (pte_none(*pte)) { | ||
576 | end_address += PAGE_SIZE; | ||
577 | pte++; | ||
578 | if ((end_address < stop_address) && | ||
579 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
580 | goto retry_pte; | ||
581 | continue; | ||
582 | } | ||
583 | /* Found next valid vmem_map page */ | ||
584 | break; | ||
585 | } while (end_address < stop_address); | ||
586 | |||
587 | end_address = min(end_address, stop_address); | ||
588 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
589 | hole_next_pfn = end_address / sizeof(struct page); | ||
590 | return hole_next_pfn - pgdat->node_start_pfn; | ||
591 | } | ||
592 | #else | ||
593 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | ||
594 | { | ||
595 | return i + 1; | ||
596 | } | ||
597 | #endif | ||
598 | |||
599 | /** | 537 | /** |
600 | * show_mem - give short summary of memory stats | 538 | * show_mem - give short summary of memory stats |
601 | * | 539 | * |
@@ -625,7 +563,8 @@ void show_mem(void) | |||
625 | if (pfn_valid(pgdat->node_start_pfn + i)) | 563 | if (pfn_valid(pgdat->node_start_pfn + i)) |
626 | page = pfn_to_page(pgdat->node_start_pfn + i); | 564 | page = pfn_to_page(pgdat->node_start_pfn + i); |
627 | else { | 565 | else { |
628 | i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; | 566 | i = vmemmap_find_next_valid_pfn(pgdat->node_id, |
567 | i) - 1; | ||
629 | continue; | 568 | continue; |
630 | } | 569 | } |
631 | if (PageReserved(page)) | 570 | if (PageReserved(page)) |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2f50c064513c..30617ccb4f7e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) | |||
415 | } | 415 | } |
416 | 416 | ||
417 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 417 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
418 | int vmemmap_find_next_valid_pfn(int node, int i) | ||
419 | { | ||
420 | unsigned long end_address, hole_next_pfn; | ||
421 | unsigned long stop_address; | ||
422 | pg_data_t *pgdat = NODE_DATA(node); | ||
423 | |||
424 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | ||
425 | end_address = PAGE_ALIGN(end_address); | ||
426 | |||
427 | stop_address = (unsigned long) &vmem_map[ | ||
428 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | ||
429 | |||
430 | do { | ||
431 | pgd_t *pgd; | ||
432 | pud_t *pud; | ||
433 | pmd_t *pmd; | ||
434 | pte_t *pte; | ||
435 | |||
436 | pgd = pgd_offset_k(end_address); | ||
437 | if (pgd_none(*pgd)) { | ||
438 | end_address += PGDIR_SIZE; | ||
439 | continue; | ||
440 | } | ||
441 | |||
442 | pud = pud_offset(pgd, end_address); | ||
443 | if (pud_none(*pud)) { | ||
444 | end_address += PUD_SIZE; | ||
445 | continue; | ||
446 | } | ||
447 | |||
448 | pmd = pmd_offset(pud, end_address); | ||
449 | if (pmd_none(*pmd)) { | ||
450 | end_address += PMD_SIZE; | ||
451 | continue; | ||
452 | } | ||
453 | |||
454 | pte = pte_offset_kernel(pmd, end_address); | ||
455 | retry_pte: | ||
456 | if (pte_none(*pte)) { | ||
457 | end_address += PAGE_SIZE; | ||
458 | pte++; | ||
459 | if ((end_address < stop_address) && | ||
460 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | ||
461 | goto retry_pte; | ||
462 | continue; | ||
463 | } | ||
464 | /* Found next valid vmem_map page */ | ||
465 | break; | ||
466 | } while (end_address < stop_address); | ||
467 | |||
468 | end_address = min(end_address, stop_address); | ||
469 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | ||
470 | hole_next_pfn = end_address / sizeof(struct page); | ||
471 | return hole_next_pfn - pgdat->node_start_pfn; | ||
472 | } | ||
418 | 473 | ||
419 | int __init | 474 | int __init |
420 | create_mem_map_page_table (u64 start, u64 end, void *arg) | 475 | create_mem_map_page_table (u64 start, u64 end, void *arg) |