diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2013-06-20 05:00:18 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-06-21 02:01:54 -0400 |
commit | 12bc9f6fc1d6582b4529ac522d2231bd2584a5f1 (patch) | |
tree | 40be8749c9b19e0e9dfddeee54436271dce2bb2f | |
parent | ac52ae4721233150a3c30e9732a1c1f4f68e7db7 (diff) |
powerpc: Replace find_linux_pte with find_linux_pte_or_hugepte
Replace find_linux_pte with find_linux_pte_or_hugepte and explicitly
document why we don't need to handle transparent hugepages at callsites.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/eeh.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/io-workarounds.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 9 |
7 files changed, 36 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 6c9323f3ab54..e71bd25d62d7 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -344,30 +344,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | |||
344 | 344 | ||
345 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | 345 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); |
346 | void pgtable_cache_init(void); | 346 | void pgtable_cache_init(void); |
347 | |||
348 | /* | ||
349 | * find_linux_pte returns the address of a linux pte for a given | ||
350 | * effective address and directory. If not found, it returns zero. | ||
351 | */ | ||
352 | static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | ||
353 | { | ||
354 | pgd_t *pg; | ||
355 | pud_t *pu; | ||
356 | pmd_t *pm; | ||
357 | pte_t *pt = NULL; | ||
358 | |||
359 | pg = pgdir + pgd_index(ea); | ||
360 | if (!pgd_none(*pg)) { | ||
361 | pu = pud_offset(pg, ea); | ||
362 | if (!pud_none(*pu)) { | ||
363 | pm = pmd_offset(pu, ea); | ||
364 | if (pmd_present(*pm)) | ||
365 | pt = pte_offset_kernel(pm, ea); | ||
366 | } | ||
367 | } | ||
368 | return pt; | ||
369 | } | ||
370 | |||
371 | #endif /* __ASSEMBLY__ */ | 347 | #endif /* __ASSEMBLY__ */ |
372 | 348 | ||
373 | /* | 349 | /* |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 7c567be3dd03..af2b9ae07df5 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -260,10 +260,15 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) | |||
260 | { | 260 | { |
261 | pte_t *ptep; | 261 | pte_t *ptep; |
262 | unsigned long pa; | 262 | unsigned long pa; |
263 | int hugepage_shift; | ||
263 | 264 | ||
264 | ptep = find_linux_pte(init_mm.pgd, token); | 265 | /* |
266 | * We won't find hugepages here, iomem | ||
267 | */ | ||
268 | ptep = find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift); | ||
265 | if (!ptep) | 269 | if (!ptep) |
266 | return token; | 270 | return token; |
271 | WARN_ON(hugepage_shift); | ||
267 | pa = pte_pfn(*ptep) << PAGE_SHIFT; | 272 | pa = pte_pfn(*ptep) << PAGE_SHIFT; |
268 | 273 | ||
269 | return pa | (token & (PAGE_SIZE-1)); | 274 | return pa | (token & (PAGE_SIZE-1)); |
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 50e90b7e7139..fa0b54b2a362 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c | |||
@@ -55,6 +55,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr) | |||
55 | 55 | ||
56 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) | 56 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) |
57 | { | 57 | { |
58 | unsigned hugepage_shift; | ||
58 | struct iowa_bus *bus; | 59 | struct iowa_bus *bus; |
59 | int token; | 60 | int token; |
60 | 61 | ||
@@ -70,11 +71,17 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) | |||
70 | if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) | 71 | if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END) |
71 | return NULL; | 72 | return NULL; |
72 | 73 | ||
73 | ptep = find_linux_pte(init_mm.pgd, vaddr); | 74 | ptep = find_linux_pte_or_hugepte(init_mm.pgd, vaddr, |
75 | &hugepage_shift); | ||
74 | if (ptep == NULL) | 76 | if (ptep == NULL) |
75 | paddr = 0; | 77 | paddr = 0; |
76 | else | 78 | else { |
79 | /* | ||
80 | * we don't have hugepages backing iomem | ||
81 | */ | ||
82 | WARN_ON(hugepage_shift); | ||
77 | paddr = pte_pfn(*ptep) << PAGE_SHIFT; | 83 | paddr = pte_pfn(*ptep) << PAGE_SHIFT; |
84 | } | ||
78 | bus = iowa_pci_find(vaddr, paddr); | 85 | bus = iowa_pci_find(vaddr, paddr); |
79 | 86 | ||
80 | if (bus == NULL) | 87 | if (bus == NULL) |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6dcbb49105a4..dcf892d25a56 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -27,7 +27,7 @@ static void *real_vmalloc_addr(void *x) | |||
27 | unsigned long addr = (unsigned long) x; | 27 | unsigned long addr = (unsigned long) x; |
28 | pte_t *p; | 28 | pte_t *p; |
29 | 29 | ||
30 | p = find_linux_pte(swapper_pg_dir, addr); | 30 | p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL); |
31 | if (!p || !pte_present(*p)) | 31 | if (!p || !pte_present(*p)) |
32 | return NULL; | 32 | return NULL; |
33 | /* assume we don't have huge pages in vmalloc space... */ | 33 | /* assume we don't have huge pages in vmalloc space... */ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 2f470809876f..e8434ca6efd4 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -1145,6 +1145,7 @@ EXPORT_SYMBOL_GPL(hash_page); | |||
1145 | void hash_preload(struct mm_struct *mm, unsigned long ea, | 1145 | void hash_preload(struct mm_struct *mm, unsigned long ea, |
1146 | unsigned long access, unsigned long trap) | 1146 | unsigned long access, unsigned long trap) |
1147 | { | 1147 | { |
1148 | int hugepage_shift; | ||
1148 | unsigned long vsid; | 1149 | unsigned long vsid; |
1149 | pgd_t *pgdir; | 1150 | pgd_t *pgdir; |
1150 | pte_t *ptep; | 1151 | pte_t *ptep; |
@@ -1166,10 +1167,15 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1166 | pgdir = mm->pgd; | 1167 | pgdir = mm->pgd; |
1167 | if (pgdir == NULL) | 1168 | if (pgdir == NULL) |
1168 | return; | 1169 | return; |
1169 | ptep = find_linux_pte(pgdir, ea); | 1170 | /* |
1171 | * THP pages use update_mmu_cache_pmd. We don't do | ||
1172 | * hash preload there. Hence can ignore THP here | ||
1173 | */ | ||
1174 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift); | ||
1170 | if (!ptep) | 1175 | if (!ptep) |
1171 | return; | 1176 | return; |
1172 | 1177 | ||
1178 | WARN_ON(hugepage_shift); | ||
1173 | #ifdef CONFIG_PPC_64K_PAGES | 1179 | #ifdef CONFIG_PPC_64K_PAGES |
1174 | /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on | 1180 | /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on |
1175 | * a 64K kernel), then we don't preload, hash_page() will take | 1181 | * a 64K kernel), then we don't preload, hash_page() will take |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 49282045ee96..8add58061003 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -105,6 +105,7 @@ int pgd_huge(pgd_t pgd) | |||
105 | 105 | ||
106 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 106 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
107 | { | 107 | { |
108 | /* Only called for hugetlbfs pages, hence can ignore THP */ | ||
108 | return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); | 109 | return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); |
109 | } | 110 | } |
110 | 111 | ||
@@ -673,11 +674,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | |||
673 | struct page *page; | 674 | struct page *page; |
674 | unsigned shift; | 675 | unsigned shift; |
675 | unsigned long mask; | 676 | unsigned long mask; |
676 | 677 | /* | |
678 | * Transparent hugepages are handled by generic code. We can skip them | ||
679 | * here. | ||
680 | */ | ||
677 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); | 681 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); |
678 | 682 | ||
679 | /* Verify it is a huge page else bail. */ | 683 | /* Verify it is a huge page else bail. */ |
680 | if (!ptep || !shift) | 684 | if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) |
681 | return ERR_PTR(-EINVAL); | 685 | return ERR_PTR(-EINVAL); |
682 | 686 | ||
683 | mask = (1UL << shift) - 1; | 687 | mask = (1UL << shift) - 1; |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 48bf63ea6525..313c85c5aa90 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -189,6 +189,7 @@ void tlb_flush(struct mmu_gather *tlb) | |||
189 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | 189 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, |
190 | unsigned long end) | 190 | unsigned long end) |
191 | { | 191 | { |
192 | int hugepage_shift; | ||
192 | unsigned long flags; | 193 | unsigned long flags; |
193 | 194 | ||
194 | start = _ALIGN_DOWN(start, PAGE_SIZE); | 195 | start = _ALIGN_DOWN(start, PAGE_SIZE); |
@@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |||
206 | local_irq_save(flags); | 207 | local_irq_save(flags); |
207 | arch_enter_lazy_mmu_mode(); | 208 | arch_enter_lazy_mmu_mode(); |
208 | for (; start < end; start += PAGE_SIZE) { | 209 | for (; start < end; start += PAGE_SIZE) { |
209 | pte_t *ptep = find_linux_pte(mm->pgd, start); | 210 | pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, |
211 | &hugepage_shift); | ||
210 | unsigned long pte; | 212 | unsigned long pte; |
211 | 213 | ||
212 | if (ptep == NULL) | 214 | if (ptep == NULL) |
@@ -214,7 +216,10 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |||
214 | pte = pte_val(*ptep); | 216 | pte = pte_val(*ptep); |
215 | if (!(pte & _PAGE_HASHPTE)) | 217 | if (!(pte & _PAGE_HASHPTE)) |
216 | continue; | 218 | continue; |
217 | hpte_need_flush(mm, start, ptep, pte, 0); | 219 | if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) |
220 | hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); | ||
221 | else | ||
222 | hpte_need_flush(mm, start, ptep, pte, 0); | ||
218 | } | 223 | } |
219 | arch_leave_lazy_mmu_mode(); | 224 | arch_leave_lazy_mmu_mode(); |
220 | local_irq_restore(flags); | 225 | local_irq_restore(flags); |