diff options
Diffstat (limited to 'arch/ia64/mm/init.c')
-rw-r--r-- | arch/ia64/mm/init.c | 46 |
1 files changed, 39 insertions, 7 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1373fae7657f..f225dd72968b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/proc_fs.h> | 20 | #include <linux/proc_fs.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/kexec.h> | ||
22 | 23 | ||
23 | #include <asm/a.out.h> | 24 | #include <asm/a.out.h> |
24 | #include <asm/dma.h> | 25 | #include <asm/dma.h> |
@@ -67,7 +68,7 @@ max_pgt_pages(void) | |||
67 | #ifndef CONFIG_NUMA | 68 | #ifndef CONFIG_NUMA |
68 | node_free_pages = nr_free_pages(); | 69 | node_free_pages = nr_free_pages(); |
69 | #else | 70 | #else |
70 | node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); | 71 | node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES); |
71 | #endif | 72 | #endif |
72 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; | 73 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; |
73 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); | 74 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); |
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte) | |||
128 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ | 129 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
129 | } | 130 | } |
130 | 131 | ||
132 | /* | ||
133 | * Since DMA is i-cache coherent, any (complete) pages that were written via | ||
134 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | ||
135 | * flush them when they get mapped into an executable vm-area. | ||
136 | */ | ||
137 | void | ||
138 | dma_mark_clean(void *addr, size_t size) | ||
139 | { | ||
140 | unsigned long pg_addr, end; | ||
141 | |||
142 | pg_addr = PAGE_ALIGN((unsigned long) addr); | ||
143 | end = (unsigned long) addr + size; | ||
144 | while (pg_addr + PAGE_SIZE <= end) { | ||
145 | struct page *page = virt_to_page(pg_addr); | ||
146 | set_bit(PG_arch_1, &page->flags); | ||
147 | pg_addr += PAGE_SIZE; | ||
148 | } | ||
149 | } | ||
150 | |||
131 | inline void | 151 | inline void |
132 | ia64_set_rbs_bot (void) | 152 | ia64_set_rbs_bot (void) |
133 | { | 153 | { |
@@ -156,9 +176,8 @@ ia64_init_addr_space (void) | |||
156 | * the problem. When the process attempts to write to the register backing store | 176 | * the problem. When the process attempts to write to the register backing store |
157 | * for the first time, it will get a SEGFAULT in this case. | 177 | * for the first time, it will get a SEGFAULT in this case. |
158 | */ | 178 | */ |
159 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 179 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
160 | if (vma) { | 180 | if (vma) { |
161 | memset(vma, 0, sizeof(*vma)); | ||
162 | vma->vm_mm = current->mm; | 181 | vma->vm_mm = current->mm; |
163 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 182 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
164 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 183 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
@@ -175,9 +194,8 @@ ia64_init_addr_space (void) | |||
175 | 194 | ||
176 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 195 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
177 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 196 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
178 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 197 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
179 | if (vma) { | 198 | if (vma) { |
180 | memset(vma, 0, sizeof(*vma)); | ||
181 | vma->vm_mm = current->mm; | 199 | vma->vm_mm = current->mm; |
182 | vma->vm_end = PAGE_SIZE; | 200 | vma->vm_end = PAGE_SIZE; |
183 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | 201 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
@@ -595,13 +613,27 @@ find_largest_hole (u64 start, u64 end, void *arg) | |||
595 | return 0; | 613 | return 0; |
596 | } | 614 | } |
597 | 615 | ||
616 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | ||
617 | |||
598 | int __init | 618 | int __init |
599 | register_active_ranges(u64 start, u64 end, void *arg) | 619 | register_active_ranges(u64 start, u64 end, void *arg) |
600 | { | 620 | { |
601 | add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); | 621 | int nid = paddr_to_nid(__pa(start)); |
622 | |||
623 | if (nid < 0) | ||
624 | nid = 0; | ||
625 | #ifdef CONFIG_KEXEC | ||
626 | if (start > crashk_res.start && start < crashk_res.end) | ||
627 | start = crashk_res.end; | ||
628 | if (end > crashk_res.start && end < crashk_res.end) | ||
629 | end = crashk_res.start; | ||
630 | #endif | ||
631 | |||
632 | if (start < end) | ||
633 | add_active_range(nid, __pa(start) >> PAGE_SHIFT, | ||
634 | __pa(end) >> PAGE_SHIFT); | ||
602 | return 0; | 635 | return 0; |
603 | } | 636 | } |
604 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | ||
605 | 637 | ||
606 | static int __init | 638 | static int __init |
607 | count_reserved_pages (u64 start, u64 end, void *arg) | 639 | count_reserved_pages (u64 start, u64 end, void *arg) |