aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2009-10-26 15:24:31 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-10-30 02:20:57 -0400
commitf71dc176aa06359681c30ba6877ffccab6fba3a6 (patch)
treeb72d97c2db323ab94399cd2633108c0a00a5da31 /arch/powerpc/mm
parent8be8cf5b47f72096e42bf88cc3afff7a942a346c (diff)
powerpc/mm: Make hpte_need_flush() correctly mask for multiple page sizes
Currently, hpte_need_flush() only correctly flushes the given address for normal pages. Callers for hugepages are required to mask the address themselves. But hpte_need_flush() already looks up the page sizes for its own reasons, so this is a rather silly imposition on the callers. This patch alters it to mask based on the pagesize it has looked up itself, and removes the awkward masking code in the hugepage caller. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c6
-rw-r--r--arch/powerpc/mm/tlb_hash64.c8
2 files changed, 4 insertions, 10 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90df6ffe3a43..3d542a9732ae 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -445,11 +445,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
445 * necessary anymore if we make hpte_need_flush() get the 445 * necessary anymore if we make hpte_need_flush() get the
446 * page size from the slices 446 * page size from the slices
447 */ 447 */
448 unsigned int psize = get_slice_psize(mm, addr); 448 pte_update(mm, addr, ptep, ~0UL, 1);
449 unsigned int shift = mmu_psize_to_shift(psize);
450 unsigned long sz = ((1UL) << shift);
451 struct hstate *hstate = size_to_hstate(sz);
452 pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
453 } 449 }
454 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 450 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
455} 451}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 2b2f35f6985e..282d9306361f 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -53,11 +53,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
53 53
54 i = batch->index; 54 i = batch->index;
55 55
56 /* We mask the address for the base page size. Huge pages will
57 * have applied their own masking already
58 */
59 addr &= PAGE_MASK;
60
61 /* Get page size (maybe move back to caller). 56 /* Get page size (maybe move back to caller).
62 * 57 *
63 * NOTE: when using special 64K mappings in 4K environment like 58 * NOTE: when using special 64K mappings in 4K environment like
@@ -75,6 +70,9 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
75 } else 70 } else
76 psize = pte_pagesize_index(mm, addr, pte); 71 psize = pte_pagesize_index(mm, addr, pte);
77 72
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75
78 /* Build full vaddr */ 76 /* Build full vaddr */
79 if (!is_kernel_addr(addr)) { 77 if (!is_kernel_addr(addr)) {
80 ssize = user_segment_size(addr); 78 ssize = user_segment_size(addr);