diff options
| -rw-r--r-- | include/asm-powerpc/pgtable.h | 13 | ||||
| -rw-r--r-- | include/asm-powerpc/tlbflush.h | 11 |
2 files changed, 13 insertions, 11 deletions
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index d18ffe7bc7c4..dbb8ca172e44 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h | |||
| @@ -38,6 +38,19 @@ extern void paging_init(void); | |||
| 38 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 38 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 39 | 39 | ||
| 40 | #include <asm-generic/pgtable.h> | 40 | #include <asm-generic/pgtable.h> |
| 41 | |||
| 42 | |||
| 43 | /* | ||
| 44 | * This gets called at the end of handling a page fault, when | ||
| 45 | * the kernel has put a new PTE into the page table for the process. | ||
| 46 | * We use it to ensure coherency between the i-cache and d-cache | ||
| 47 | * for the page which has just been mapped in. | ||
| 48 | * On machines which use an MMU hash table, we use this to put a | ||
| 49 | * corresponding HPTE into the hash table ahead of time, instead of | ||
| 50 | * waiting for the inevitable extra hash-table miss exception. | ||
| 51 | */ | ||
| 52 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
| 53 | |||
| 41 | #endif /* __ASSEMBLY__ */ | 54 | #endif /* __ASSEMBLY__ */ |
| 42 | 55 | ||
| 43 | #endif /* __KERNEL__ */ | 56 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 5c9108147644..361cd5c7a32b 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h | |||
| @@ -162,16 +162,5 @@ extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |||
| 162 | 162 | ||
| 163 | #endif | 163 | #endif |
| 164 | 164 | ||
| 165 | /* | ||
| 166 | * This gets called at the end of handling a page fault, when | ||
| 167 | * the kernel has put a new PTE into the page table for the process. | ||
| 168 | * We use it to ensure coherency between the i-cache and d-cache | ||
| 169 | * for the page which has just been mapped in. | ||
| 170 | * On machines which use an MMU hash table, we use this to put a | ||
| 171 | * corresponding HPTE into the hash table ahead of time, instead of | ||
| 172 | * waiting for the inevitable extra hash-table miss exception. | ||
| 173 | */ | ||
| 174 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
| 175 | |||
| 176 | #endif /*__KERNEL__ */ | 165 | #endif /*__KERNEL__ */ |
| 177 | #endif /* _ASM_POWERPC_TLBFLUSH_H */ | 166 | #endif /* _ASM_POWERPC_TLBFLUSH_H */ |
