aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2009-10-26 15:24:31 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-10-30 02:21:23 -0400
commit0895ecda79428df48501e48dd0a868e0c8e1aae2 (patch)
tree8ecfc6e0bf9c47c793df4c1a1bf1f503c9351281 /arch/powerpc/mm
parent883a3e523672ebba2ec3969837ba02af4f70fae2 (diff)
powerpc/mm: Bring hugepage PTE accessor functions back into sync with normal accessors
The hugepage arch code provides a number of hook functions/macros which mirror the functionality of various normal page pte access functions. Various changes in the normal page accessors (in particular BenH's recent changes to the handling of lazy icache flushing and PAGE_EXEC) have caused the hugepage versions to get out of sync with the originals. In some cases, this is a bug, at least on some MMU types. One of the reasons that some hooks were not identical to the normal page versions, is that the fact we're dealing with a hugepage needed to be passed down do use the correct dcache-icache flush function. This patch makes the main flush_dcache_icache_page() function hugepage aware (by checking for the PageCompound flag). That in turn means we can make set_huge_pte_at() just a call to set_pte_at() bringing it back into sync. As a bonus, this lets us remove the hash_huge_page_do_lazy_icache() function, replacing it with a call to the hash_page_do_lazy_icache() function it was based on. Some other hugepage pte access hooks - huge_ptep_get_and_clear() and huge_ptep_clear_flush() - are not so easily unified, but this patch at least brings them back into sync with the current versions of the corresponding normal page functions. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c30
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/mem.c17
4 files changed, 25 insertions, 55 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ef1f047f5431..fa251f8c2f82 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -775,7 +775,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
775 /* page is dirty */ 775 /* page is dirty */
776 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 776 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
777 if (trap == 0x400) { 777 if (trap == 0x400) {
778 __flush_dcache_icache(page_address(page)); 778 flush_dcache_icache_page(page);
779 set_bit(PG_arch_1, &page->flags); 779 set_bit(PG_arch_1, &page->flags);
780 } else 780 } else
781 pp |= HPTE_R_N; 781 pp |= HPTE_R_N;
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 1508ffc1e1e1..199539882f92 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -14,33 +14,6 @@
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/machdep.h> 15#include <asm/machdep.h>
16 16
17/*
18 * Called by asm hashtable.S for doing lazy icache flush
19 */
20static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
21 pte_t pte, int trap, unsigned long sz)
22{
23 struct page *page;
24 int i;
25
26 if (!pfn_valid(pte_pfn(pte)))
27 return rflags;
28
29 page = pte_page(pte);
30
31 /* page is dirty */
32 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
33 if (trap == 0x400) {
34 for (i = 0; i < (sz / PAGE_SIZE); i++)
35 __flush_dcache_icache(page_address(page+i));
36 set_bit(PG_arch_1, &page->flags);
37 } else {
38 rflags |= HPTE_R_N;
39 }
40 }
41 return rflags;
42}
43
44int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 17int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
45 pte_t *ptep, unsigned long trap, int local, int ssize, 18 pte_t *ptep, unsigned long trap, int local, int ssize,
46 unsigned int shift, unsigned int mmu_psize) 19 unsigned int shift, unsigned int mmu_psize)
@@ -89,8 +62,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
89 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 62 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
90 /* No CPU has hugepages but lacks no execute, so we 63 /* No CPU has hugepages but lacks no execute, so we
91 * don't need to worry about that case */ 64 * don't need to worry about that case */
92 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), 65 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
93 trap, sz);
94 66
95 /* Check if pte already has an hpte (case 2) */ 67 /* Check if pte already has an hpte (case 2) */
96 if (unlikely(old_pte & _PAGE_HASHPTE)) { 68 if (unlikely(old_pte & _PAGE_HASHPTE)) {
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1bf065546fa1..53b200abb025 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -344,27 +344,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
344 } while (pgd++, addr = next, addr != end); 344 } while (pgd++, addr = next, addr != end);
345} 345}
346 346
347void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
348 pte_t *ptep, pte_t pte)
349{
350 if (pte_present(*ptep)) {
351 /* We open-code pte_clear because we need to pass the right
352 * argument to hpte_need_flush (huge / !huge). Might not be
353 * necessary anymore if we make hpte_need_flush() get the
354 * page size from the slices
355 */
356 pte_update(mm, addr, ptep, ~0UL, 1);
357 }
358 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
359}
360
361pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
362 pte_t *ptep)
363{
364 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
365 return __pte(old);
366}
367
368struct page * 347struct page *
369follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 348follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
370{ 349{
@@ -580,3 +559,13 @@ static int __init hugetlbpage_init(void)
580} 559}
581 560
582module_init(hugetlbpage_init); 561module_init(hugetlbpage_init);
562
563void flush_dcache_icache_hugepage(struct page *page)
564{
565 int i;
566
567 BUG_ON(!PageCompound(page));
568
569 for (i = 0; i < (1UL << compound_order(page)); i++)
570 __flush_dcache_icache(page_address(page+i));
571}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 59736317bf0e..b9b152558f9c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,6 +32,7 @@
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/suspend.h> 33#include <linux/suspend.h>
34#include <linux/lmb.h> 34#include <linux/lmb.h>
35#include <linux/hugetlb.h>
35 36
36#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
37#include <asm/prom.h> 38#include <asm/prom.h>
@@ -417,18 +418,26 @@ EXPORT_SYMBOL(flush_dcache_page);
417 418
418void flush_dcache_icache_page(struct page *page) 419void flush_dcache_icache_page(struct page *page)
419{ 420{
421#ifdef CONFIG_HUGETLB_PAGE
422 if (PageCompound(page)) {
423 flush_dcache_icache_hugepage(page);
424 return;
425 }
426#endif
420#ifdef CONFIG_BOOKE 427#ifdef CONFIG_BOOKE
421 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 428 {
422 __flush_dcache_icache(start); 429 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
423 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 430 __flush_dcache_icache(start);
431 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
432 }
424#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 433#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
425 /* On 8xx there is no need to kmap since highmem is not supported */ 434 /* On 8xx there is no need to kmap since highmem is not supported */
426 __flush_dcache_icache(page_address(page)); 435 __flush_dcache_icache(page_address(page));
427#else 436#else
428 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 437 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
429#endif 438#endif
430
431} 439}
440
432void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 441void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
433{ 442{
434 clear_page(page); 443 clear_page(page);