diff options
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 43 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/numa.c | 24 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 21 |
6 files changed, 82 insertions, 17 deletions
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index 7078f67887ec..d78d20f0a0f0 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile | |||
@@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o | |||
7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
8 | obj-$(CONFIG_NUMA) += numa.o | 8 | obj-$(CONFIG_NUMA) += numa.o |
9 | obj-$(CONFIG_DISCONTIGMEM) += discontig.o | 9 | obj-$(CONFIG_DISCONTIGMEM) += discontig.o |
10 | ifndef CONFIG_DISCONTIGMEM | 10 | obj-$(CONFIG_SPARSEMEM) += discontig.o |
11 | obj-y += contig.o | 11 | obj-$(CONFIG_FLATMEM) += contig.o |
12 | endif | ||
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 91a055f5731f..acaaec4e4681 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -269,7 +269,7 @@ paging_init (void) | |||
269 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 269 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
270 | if (max_gap < LARGE_GAP) { | 270 | if (max_gap < LARGE_GAP) { |
271 | vmem_map = (struct page *) 0; | 271 | vmem_map = (struct page *) 0; |
272 | free_area_init_node(0, &contig_page_data, zones_size, 0, | 272 | free_area_init_node(0, NODE_DATA(0), zones_size, 0, |
273 | zholes_size); | 273 | zholes_size); |
274 | } else { | 274 | } else { |
275 | unsigned long map_size; | 275 | unsigned long map_size; |
@@ -282,7 +282,7 @@ paging_init (void) | |||
282 | efi_memmap_walk(create_mem_map_page_table, NULL); | 282 | efi_memmap_walk(create_mem_map_page_table, NULL); |
283 | 283 | ||
284 | NODE_DATA(0)->node_mem_map = vmem_map; | 284 | NODE_DATA(0)->node_mem_map = vmem_map; |
285 | free_area_init_node(0, &contig_page_data, zones_size, | 285 | free_area_init_node(0, NODE_DATA(0), zones_size, |
286 | 0, zholes_size); | 286 | 0, zholes_size); |
287 | 287 | ||
288 | printk("Virtual mem_map starts at 0x%p\n", mem_map); | 288 | printk("Virtual mem_map starts at 0x%p\n", mem_map); |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index b5c90e548195..a3788fb84809 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -421,6 +421,37 @@ static void __init memory_less_nodes(void) | |||
421 | return; | 421 | return; |
422 | } | 422 | } |
423 | 423 | ||
424 | #ifdef CONFIG_SPARSEMEM | ||
425 | /** | ||
426 | * register_sparse_mem - notify SPARSEMEM that this memory range exists. | ||
427 | * @start: physical start of range | ||
428 | * @end: physical end of range | ||
429 | * @arg: unused | ||
430 | * | ||
431 | * Simply calls SPARSEMEM to register memory section(s). | ||
432 | */ | ||
433 | static int __init register_sparse_mem(unsigned long start, unsigned long end, | ||
434 | void *arg) | ||
435 | { | ||
436 | int nid; | ||
437 | |||
438 | start = __pa(start) >> PAGE_SHIFT; | ||
439 | end = __pa(end) >> PAGE_SHIFT; | ||
440 | nid = early_pfn_to_nid(start); | ||
441 | memory_present(nid, start, end); | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static void __init arch_sparse_init(void) | ||
447 | { | ||
448 | efi_memmap_walk(register_sparse_mem, NULL); | ||
449 | sparse_init(); | ||
450 | } | ||
451 | #else | ||
452 | #define arch_sparse_init() do {} while (0) | ||
453 | #endif | ||
454 | |||
424 | /** | 455 | /** |
425 | * find_memory - walk the EFI memory map and setup the bootmem allocator | 456 | * find_memory - walk the EFI memory map and setup the bootmem allocator |
426 | * | 457 | * |
@@ -528,8 +559,10 @@ void show_mem(void) | |||
528 | int shared = 0, cached = 0, reserved = 0; | 559 | int shared = 0, cached = 0, reserved = 0; |
529 | printk("Node ID: %d\n", pgdat->node_id); | 560 | printk("Node ID: %d\n", pgdat->node_id); |
530 | for(i = 0; i < pgdat->node_spanned_pages; i++) { | 561 | for(i = 0; i < pgdat->node_spanned_pages; i++) { |
531 | struct page *page = pgdat_page_nr(pgdat, i); | 562 | struct page *page; |
532 | if (!ia64_pfn_valid(pgdat->node_start_pfn+i)) | 563 | if (pfn_valid(pgdat->node_start_pfn + i)) |
564 | page = pfn_to_page(pgdat->node_start_pfn + i); | ||
565 | else | ||
533 | continue; | 566 | continue; |
534 | if (PageReserved(page)) | 567 | if (PageReserved(page)) |
535 | reserved++; | 568 | reserved++; |
@@ -648,12 +681,16 @@ void __init paging_init(void) | |||
648 | 681 | ||
649 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 682 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
650 | 683 | ||
684 | arch_sparse_init(); | ||
685 | |||
651 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); | 686 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
652 | 687 | ||
688 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
653 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); | 689 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); |
654 | vmem_map = (struct page *) vmalloc_end; | 690 | vmem_map = (struct page *) vmalloc_end; |
655 | efi_memmap_walk(create_mem_map_page_table, NULL); | 691 | efi_memmap_walk(create_mem_map_page_table, NULL); |
656 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 692 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
693 | #endif | ||
657 | 694 | ||
658 | for_each_online_node(node) { | 695 | for_each_online_node(node) { |
659 | memset(zones_size, 0, sizeof(zones_size)); | 696 | memset(zones_size, 0, sizeof(zones_size)); |
@@ -690,7 +727,9 @@ void __init paging_init(void) | |||
690 | 727 | ||
691 | pfn_offset = mem_data[node].min_pfn; | 728 | pfn_offset = mem_data[node].min_pfn; |
692 | 729 | ||
730 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
693 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; | 731 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; |
732 | #endif | ||
694 | free_area_init_node(node, NODE_DATA(node), zones_size, | 733 | free_area_init_node(node, NODE_DATA(node), zones_size, |
695 | pfn_offset, zholes_size); | 734 | pfn_offset, zholes_size); |
696 | } | 735 | } |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1281c609ee98..98246acd4991 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -593,7 +593,7 @@ mem_init (void) | |||
593 | platform_dma_init(); | 593 | platform_dma_init(); |
594 | #endif | 594 | #endif |
595 | 595 | ||
596 | #ifndef CONFIG_DISCONTIGMEM | 596 | #ifdef CONFIG_FLATMEM |
597 | if (!mem_map) | 597 | if (!mem_map) |
598 | BUG(); | 598 | BUG(); |
599 | max_mapnr = max_low_pfn; | 599 | max_mapnr = max_low_pfn; |
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 77118bbf3d8b..4e5c8b36ad93 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c | |||
@@ -47,3 +47,27 @@ paddr_to_nid(unsigned long paddr) | |||
47 | 47 | ||
48 | return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); | 48 | return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); |
49 | } | 49 | } |
50 | |||
51 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) | ||
52 | /* | ||
53 | * Because of holes evaluate on section limits. | ||
54 | * If the section of memory exists, then return the node where the section | ||
55 | * resides. Otherwise return node 0 as the default. This is used by | ||
56 | * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where | ||
57 | * the section resides. | ||
58 | */ | ||
59 | int early_pfn_to_nid(unsigned long pfn) | ||
60 | { | ||
61 | int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; | ||
62 | |||
63 | for (i = 0; i < num_node_memblks; i++) { | ||
64 | ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; | ||
65 | esec = (node_memblk[i].start_paddr + node_memblk[i].size + | ||
66 | ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; | ||
67 | if (section >= ssec && section < esec) | ||
68 | return node_memblk[i].nid; | ||
69 | } | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | #endif | ||
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 464557e4ed82..c93e0f2b5fea 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -77,19 +77,25 @@ wrap_mmu_context (struct mm_struct *mm) | |||
77 | /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ | 77 | /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ |
78 | { | 78 | { |
79 | int cpu = get_cpu(); /* prevent preemption/migration */ | 79 | int cpu = get_cpu(); /* prevent preemption/migration */ |
80 | for (i = 0; i < NR_CPUS; ++i) | 80 | for_each_online_cpu(i) { |
81 | if (cpu_online(i) && (i != cpu)) | 81 | if (i != cpu) |
82 | per_cpu(ia64_need_tlb_flush, i) = 1; | 82 | per_cpu(ia64_need_tlb_flush, i) = 1; |
83 | } | ||
83 | put_cpu(); | 84 | put_cpu(); |
84 | } | 85 | } |
85 | local_flush_tlb_all(); | 86 | local_flush_tlb_all(); |
86 | } | 87 | } |
87 | 88 | ||
88 | void | 89 | void |
89 | ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits) | 90 | ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) |
90 | { | 91 | { |
91 | static DEFINE_SPINLOCK(ptcg_lock); | 92 | static DEFINE_SPINLOCK(ptcg_lock); |
92 | 93 | ||
94 | if (mm != current->active_mm) { | ||
95 | flush_tlb_all(); | ||
96 | return; | ||
97 | } | ||
98 | |||
93 | /* HW requires global serialization of ptc.ga. */ | 99 | /* HW requires global serialization of ptc.ga. */ |
94 | spin_lock(&ptcg_lock); | 100 | spin_lock(&ptcg_lock); |
95 | { | 101 | { |
@@ -135,15 +141,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long | |||
135 | unsigned long size = end - start; | 141 | unsigned long size = end - start; |
136 | unsigned long nbits; | 142 | unsigned long nbits; |
137 | 143 | ||
144 | #ifndef CONFIG_SMP | ||
138 | if (mm != current->active_mm) { | 145 | if (mm != current->active_mm) { |
139 | /* this does happen, but perhaps it's not worth optimizing for? */ | ||
140 | #ifdef CONFIG_SMP | ||
141 | flush_tlb_all(); | ||
142 | #else | ||
143 | mm->context = 0; | 146 | mm->context = 0; |
144 | #endif | ||
145 | return; | 147 | return; |
146 | } | 148 | } |
149 | #endif | ||
147 | 150 | ||
148 | nbits = ia64_fls(size + 0xfff); | 151 | nbits = ia64_fls(size + 0xfff); |
149 | while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits)) | 152 | while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits)) |
@@ -153,7 +156,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long | |||
153 | start &= ~((1UL << nbits) - 1); | 156 | start &= ~((1UL << nbits) - 1); |
154 | 157 | ||
155 | # ifdef CONFIG_SMP | 158 | # ifdef CONFIG_SMP |
156 | platform_global_tlb_purge(start, end, nbits); | 159 | platform_global_tlb_purge(mm, start, end, nbits); |
157 | # else | 160 | # else |
158 | do { | 161 | do { |
159 | ia64_ptcl(start, (nbits<<2)); | 162 | ia64_ptcl(start, (nbits<<2)); |