diff options
Diffstat (limited to 'arch/powerpc/include/asm/pgtable.h')
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h new file mode 100644 index 000000000000..dbb8ca172e44 --- /dev/null +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_H | ||
2 | #define _ASM_POWERPC_PGTABLE_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | #ifndef __ASSEMBLY__ | ||
6 | #include <asm/processor.h> /* For TASK_SIZE */ | ||
7 | #include <asm/mmu.h> | ||
8 | #include <asm/page.h> | ||
9 | struct mm_struct; | ||
10 | #endif /* !__ASSEMBLY__ */ | ||
11 | |||
12 | #if defined(CONFIG_PPC64) | ||
13 | # include <asm/pgtable-ppc64.h> | ||
14 | #else | ||
15 | # include <asm/pgtable-ppc32.h> | ||
16 | #endif | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | /* | ||
20 | * ZERO_PAGE is a global shared page that is always zero: used | ||
21 | * for zero-mapped memory areas etc.. | ||
22 | */ | ||
23 | extern unsigned long empty_zero_page[]; | ||
24 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
25 | |||
26 | extern pgd_t swapper_pg_dir[]; | ||
27 | |||
28 | extern void paging_init(void); | ||
29 | |||
30 | /* | ||
31 | * kern_addr_valid is intended to indicate whether an address is a valid | ||
32 | * kernel address. Most 32-bit archs define it as always true (like this) | ||
33 | * but most 64-bit archs actually perform a test. What should we do here? | ||
34 | */ | ||
35 | #define kern_addr_valid(addr) (1) | ||
36 | |||
37 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
38 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
39 | |||
40 | #include <asm-generic/pgtable.h> | ||
41 | |||
42 | |||
43 | /* | ||
44 | * This gets called at the end of handling a page fault, when | ||
45 | * the kernel has put a new PTE into the page table for the process. | ||
46 | * We use it to ensure coherency between the i-cache and d-cache | ||
47 | * for the page which has just been mapped in. | ||
48 | * On machines which use an MMU hash table, we use this to put a | ||
49 | * corresponding HPTE into the hash table ahead of time, instead of | ||
50 | * waiting for the inevitable extra hash-table miss exception. | ||
51 | */ | ||
52 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
53 | |||
54 | #endif /* __ASSEMBLY__ */ | ||
55 | |||
56 | #endif /* __KERNEL__ */ | ||
57 | #endif /* _ASM_POWERPC_PGTABLE_H */ | ||