aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c113
-rw-r--r--arch/ia64/mm/discontig.c50
-rw-r--r--arch/ia64/mm/init.c64
3 files changed, 126 insertions, 101 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b9..44ce5ed9444c 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,71 +30,73 @@ static unsigned long max_gap;
30#endif 30#endif
31 31
32/** 32/**
33 * show_mem - display a memory statistics summary 33 * show_mem - give short summary of memory stats
34 * 34 *
35 * Just walks the pages in the system and describes where they're allocated. 35 * Shows a simple page count of reserved and used pages in the system.
36 * For discontig machines, it does this on a per-pgdat basis.
36 */ 37 */
37void 38void show_mem(void)
38show_mem (void)
39{ 39{
40 int i, total = 0, reserved = 0; 40 int i, total_reserved = 0;
41 int shared = 0, cached = 0; 41 int total_shared = 0, total_cached = 0;
42 unsigned long total_present = 0;
43 pg_data_t *pgdat;
42 44
43 printk(KERN_INFO "Mem-info:\n"); 45 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 46 show_free_areas();
45
46 printk(KERN_INFO "Free swap: %6ldkB\n", 47 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10)); 48 nr_swap_pages<<(PAGE_SHIFT-10));
48 i = max_mapnr; 49 printk(KERN_INFO "Node memory in pages:\n");
49 for (i = 0; i < max_mapnr; i++) { 50 for_each_online_pgdat(pgdat) {
50 if (!pfn_valid(i)) { 51 unsigned long present;
52 unsigned long flags;
53 int shared = 0, cached = 0, reserved = 0;
54
55 pgdat_resize_lock(pgdat, &flags);
56 present = pgdat->node_present_pages;
57 for(i = 0; i < pgdat->node_spanned_pages; i++) {
58 struct page *page;
59 if (pfn_valid(pgdat->node_start_pfn + i))
60 page = pfn_to_page(pgdat->node_start_pfn + i);
61 else {
51#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP) 63 if (max_gap < LARGE_GAP)
53 continue; 64 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif 65#endif
56 continue; 66 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
67 i) - 1;
68 continue;
69 }
70 if (PageReserved(page))
71 reserved++;
72 else if (PageSwapCache(page))
73 cached++;
74 else if (page_count(page))
75 shared += page_count(page)-1;
57 } 76 }
58 total++; 77 pgdat_resize_unlock(pgdat, &flags);
59 if (PageReserved(mem_map+i)) 78 total_present += present;
60 reserved++; 79 total_reserved += reserved;
61 else if (PageSwapCache(mem_map+i)) 80 total_cached += cached;
62 cached++; 81 total_shared += shared;
63 else if (page_count(mem_map + i)) 82 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
64 shared += page_count(mem_map + i) - 1; 83 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
84 present, reserved, shared, cached);
65 } 85 }
66 printk(KERN_INFO "%d pages of RAM\n", total); 86 printk(KERN_INFO "%ld pages of RAM\n", total_present);
67 printk(KERN_INFO "%d reserved pages\n", reserved); 87 printk(KERN_INFO "%d reserved pages\n", total_reserved);
68 printk(KERN_INFO "%d pages shared\n", shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
69 printk(KERN_INFO "%d pages swap cached\n", cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
70 printk(KERN_INFO "%ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 pgtable_quicklist_total_size()); 91 pgtable_quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
72} 93}
73 94
95
74/* physical address where the bootmem map is located */ 96/* physical address where the bootmem map is located */
75unsigned long bootmap_start; 97unsigned long bootmap_start;
76 98
77/** 99/**
78 * find_max_pfn - adjust the maximum page number callback
79 * @start: start of range
80 * @end: end of range
81 * @arg: address of pointer to global max_pfn variable
82 *
83 * Passed as a callback function to efi_memmap_walk() to determine the highest
84 * available page frame number in the system.
85 */
86int
87find_max_pfn (unsigned long start, unsigned long end, void *arg)
88{
89 unsigned long *max_pfnp = arg, pfn;
90
91 pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
92 if (pfn > *max_pfnp)
93 *max_pfnp = pfn;
94 return 0;
95}
96
97/**
98 * find_bootmap_location - callback to find a memory area for the bootmap 100 * find_bootmap_location - callback to find a memory area for the bootmap
99 * @start: start of region 101 * @start: start of region
100 * @end: end of region 102 * @end: end of region
@@ -155,9 +157,10 @@ find_memory (void)
155 reserve_memory(); 157 reserve_memory();
156 158
157 /* first find highest page frame number */ 159 /* first find highest page frame number */
158 max_pfn = 0; 160 min_low_pfn = ~0UL;
159 efi_memmap_walk(find_max_pfn, &max_pfn); 161 max_low_pfn = 0;
160 162 efi_memmap_walk(find_max_min_low_pfn, NULL);
163 max_pfn = max_low_pfn;
161 /* how many bytes to cover all the pages */ 164 /* how many bytes to cover all the pages */
162 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT; 165 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
163 166
@@ -167,7 +170,8 @@ find_memory (void)
167 if (bootmap_start == ~0UL) 170 if (bootmap_start == ~0UL)
168 panic("Cannot find %ld bytes for bootmap\n", bootmap_size); 171 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
169 172
170 bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn); 173 bootmap_size = init_bootmem_node(NODE_DATA(0),
174 (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
171 175
172 /* Free all available memory, then mark bootmem-map as being in use. */ 176 /* Free all available memory, then mark bootmem-map as being in use. */
173 efi_memmap_walk(filter_rsvd_memory, free_bootmem); 177 efi_memmap_walk(filter_rsvd_memory, free_bootmem);
@@ -175,11 +179,6 @@ find_memory (void)
175 179
176 find_initrd(); 180 find_initrd();
177 181
178#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */
181 saved_max_pfn = max_pfn;
182#endif
183} 182}
184 183
185#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
@@ -237,9 +236,11 @@ paging_init (void)
237 num_physpages = 0; 236 num_physpages = 0;
238 efi_memmap_walk(count_pages, &num_physpages); 237 efi_memmap_walk(count_pages, &num_physpages);
239 238
240 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
241 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 239 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
240#ifdef CONFIG_ZONE_DMA
241 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
242 max_zone_pfns[ZONE_DMA] = max_dma; 242 max_zone_pfns[ZONE_DMA] = max_dma;
243#endif
243 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
244 245
245#ifdef CONFIG_VIRTUAL_MEM_MAP 246#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..872da7a2accd 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -37,7 +37,9 @@ struct early_node_data {
37 unsigned long pernode_size; 37 unsigned long pernode_size;
38 struct bootmem_data bootmem_data; 38 struct bootmem_data bootmem_data;
39 unsigned long num_physpages; 39 unsigned long num_physpages;
40#ifdef CONFIG_ZONE_DMA
40 unsigned long num_dma_physpages; 41 unsigned long num_dma_physpages;
42#endif
41 unsigned long min_pfn; 43 unsigned long min_pfn;
42 unsigned long max_pfn; 44 unsigned long max_pfn;
43}; 45};
@@ -86,9 +88,6 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
86 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); 88 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
87 } 89 }
88 90
89 min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
90 max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
91
92 return 0; 91 return 0;
93} 92}
94 93
@@ -412,37 +411,6 @@ static void __init memory_less_nodes(void)
412 return; 411 return;
413} 412}
414 413
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 414/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 415 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 416 *
@@ -467,12 +435,16 @@ void __init find_memory(void)
467 /* These actually end up getting called by call_pernode_memory() */ 435 /* These actually end up getting called by call_pernode_memory() */
468 efi_memmap_walk(filter_rsvd_memory, build_node_maps); 436 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
469 efi_memmap_walk(filter_rsvd_memory, find_pernode_space); 437 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
438 efi_memmap_walk(find_max_min_low_pfn, NULL);
470 439
471 for_each_online_node(node) 440 for_each_online_node(node)
472 if (mem_data[node].bootmem_data.node_low_pfn) { 441 if (mem_data[node].bootmem_data.node_low_pfn) {
473 node_clear(node, memory_less_mask); 442 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 443 mem_data[node].min_pfn = ~0UL;
475 } 444 }
445
446 efi_memmap_walk(register_active_ranges, NULL);
447
476 /* 448 /*
477 * Initialize the boot memory maps in reverse order since that's 449 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 450 * what the bootmem allocator expects
@@ -654,11 +626,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 626{
655 unsigned long end = start + len; 627 unsigned long end = start + len;
656 628
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 629 mem_data[node].num_physpages += len >> PAGE_SHIFT;
630#ifdef CONFIG_ZONE_DMA
659 if (start <= __pa(MAX_DMA_ADDRESS)) 631 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 632 mem_data[node].num_dma_physpages +=
661 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; 633 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
634#endif
662 start = GRANULEROUNDDOWN(start); 635 start = GRANULEROUNDDOWN(start);
663 start = ORDERROUNDDOWN(start); 636 start = ORDERROUNDDOWN(start);
664 end = GRANULEROUNDUP(end); 637 end = GRANULEROUNDUP(end);
@@ -686,10 +659,11 @@ void __init paging_init(void)
686 659
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 660 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 661
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 662 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 663
664 sparse_memory_present_with_active_regions(MAX_NUMNODES);
665 sparse_init();
666
693#ifdef CONFIG_VIRTUAL_MEM_MAP 667#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 668 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 669 sizeof(struct page));
@@ -710,7 +684,9 @@ void __init paging_init(void)
710 } 684 }
711 685
712 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 686 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
687#ifdef CONFIG_ZONE_DMA
713 max_zone_pfns[ZONE_DMA] = max_dma; 688 max_zone_pfns[ZONE_DMA] = max_dma;
689#endif
714 max_zone_pfns[ZONE_NORMAL] = max_pfn; 690 max_zone_pfns[ZONE_NORMAL] = max_pfn;
715 free_area_init_nodes(max_zone_pfns); 691 free_area_init_nodes(max_zone_pfns);
716 692
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 07d82cd7cbdd..5b70241741b4 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -67,7 +68,7 @@ max_pgt_pages(void)
67#ifndef CONFIG_NUMA 68#ifndef CONFIG_NUMA
68 node_free_pages = nr_free_pages(); 69 node_free_pages = nr_free_pages();
69#else 70#else
70 node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); 71 node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
71#endif 72#endif
72 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; 73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
73 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); 74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -135,7 +155,7 @@ ia64_set_rbs_bot (void)
135 155
136 if (stack_size > MAX_USER_STACK_SIZE) 156 if (stack_size > MAX_USER_STACK_SIZE)
137 stack_size = MAX_USER_STACK_SIZE; 157 stack_size = MAX_USER_STACK_SIZE;
138 current->thread.rbs_bot = STACK_TOP - stack_size; 158 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
139} 159}
140 160
141/* 161/*
@@ -156,9 +176,8 @@ ia64_init_addr_space (void)
156 * the problem. When the process attempts to write to the register backing store 176 * the problem. When the process attempts to write to the register backing store
157 * for the first time, it will get a SEGFAULT in this case. 177 * for the first time, it will get a SEGFAULT in this case.
158 */ 178 */
159 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 179 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
160 if (vma) { 180 if (vma) {
161 memset(vma, 0, sizeof(*vma));
162 vma->vm_mm = current->mm; 181 vma->vm_mm = current->mm;
163 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 182 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
164 vma->vm_end = vma->vm_start + PAGE_SIZE; 183 vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -175,9 +194,8 @@ ia64_init_addr_space (void)
175 194
176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 195 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
177 if (!(current->personality & MMAP_PAGE_ZERO)) { 196 if (!(current->personality & MMAP_PAGE_ZERO)) {
178 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 197 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
179 if (vma) { 198 if (vma) {
180 memset(vma, 0, sizeof(*vma));
181 vma->vm_mm = current->mm; 199 vma->vm_mm = current->mm;
182 vma->vm_end = PAGE_SIZE; 200 vma->vm_end = PAGE_SIZE;
183 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 201 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -586,13 +604,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
586 return 0; 604 return 0;
587} 605}
588 606
607#endif /* CONFIG_VIRTUAL_MEM_MAP */
608
589int __init 609int __init
590register_active_ranges(u64 start, u64 end, void *arg) 610register_active_ranges(u64 start, u64 end, void *arg)
591{ 611{
592 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 612 int nid = paddr_to_nid(__pa(start));
613
614 if (nid < 0)
615 nid = 0;
616#ifdef CONFIG_KEXEC
617 if (start > crashk_res.start && start < crashk_res.end)
618 start = crashk_res.end;
619 if (end > crashk_res.start && end < crashk_res.end)
620 end = crashk_res.start;
621#endif
622
623 if (start < end)
624 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
625 __pa(end) >> PAGE_SHIFT);
593 return 0; 626 return 0;
594} 627}
595#endif /* CONFIG_VIRTUAL_MEM_MAP */
596 628
597static int __init 629static int __init
598count_reserved_pages (u64 start, u64 end, void *arg) 630count_reserved_pages (u64 start, u64 end, void *arg)
@@ -607,6 +639,22 @@ count_reserved_pages (u64 start, u64 end, void *arg)
607 return 0; 639 return 0;
608} 640}
609 641
642int
643find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
644{
645 unsigned long pfn_start, pfn_end;
646#ifdef CONFIG_FLATMEM
647 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
648 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
649#else
650 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
651 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
652#endif
653 min_low_pfn = min(min_low_pfn, pfn_start);
654 max_low_pfn = max(max_low_pfn, pfn_end);
655 return 0;
656}
657
610/* 658/*
611 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight 659 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
612 * system call handler. When this option is in effect, all fsyscalls will end up bubbling 660 * system call handler. When this option is in effect, all fsyscalls will end up bubbling