aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/init.c')
-rw-r--r--arch/ia64/mm/init.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
595 return 0; 615 return 0;
596} 616}
597 617
618#endif /* CONFIG_VIRTUAL_MEM_MAP */
619
598int __init 620int __init
599register_active_ranges(u64 start, u64 end, void *arg) 621register_active_ranges(u64 start, u64 end, void *arg)
600{ 622{
601 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 623 int nid = paddr_to_nid(__pa(start));
624
625 if (nid < 0)
626 nid = 0;
627#ifdef CONFIG_KEXEC
628 if (start > crashk_res.start && start < crashk_res.end)
629 start = crashk_res.end;
630 if (end > crashk_res.start && end < crashk_res.end)
631 end = crashk_res.start;
632#endif
633
634 if (start < end)
635 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
636 __pa(end) >> PAGE_SHIFT);
602 return 0; 637 return 0;
603} 638}
604#endif /* CONFIG_VIRTUAL_MEM_MAP */
605 639
606static int __init 640static int __init
607count_reserved_pages (u64 start, u64 end, void *arg) 641count_reserved_pages (u64 start, u64 end, void *arg)