diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage-hash64.c | 30 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 31 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 17 |
4 files changed, 25 insertions, 55 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index ef1f047f543..fa251f8c2f8 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -775,7 +775,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) | |||
775 | /* page is dirty */ | 775 | /* page is dirty */ |
776 | if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { | 776 | if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { |
777 | if (trap == 0x400) { | 777 | if (trap == 0x400) { |
778 | __flush_dcache_icache(page_address(page)); | 778 | flush_dcache_icache_page(page); |
779 | set_bit(PG_arch_1, &page->flags); | 779 | set_bit(PG_arch_1, &page->flags); |
780 | } else | 780 | } else |
781 | pp |= HPTE_R_N; | 781 | pp |= HPTE_R_N; |
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 1508ffc1e1e..199539882f9 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c | |||
@@ -14,33 +14,6 @@ | |||
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/machdep.h> | 15 | #include <asm/machdep.h> |
16 | 16 | ||
17 | /* | ||
18 | * Called by asm hashtable.S for doing lazy icache flush | ||
19 | */ | ||
20 | static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, | ||
21 | pte_t pte, int trap, unsigned long sz) | ||
22 | { | ||
23 | struct page *page; | ||
24 | int i; | ||
25 | |||
26 | if (!pfn_valid(pte_pfn(pte))) | ||
27 | return rflags; | ||
28 | |||
29 | page = pte_page(pte); | ||
30 | |||
31 | /* page is dirty */ | ||
32 | if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { | ||
33 | if (trap == 0x400) { | ||
34 | for (i = 0; i < (sz / PAGE_SIZE); i++) | ||
35 | __flush_dcache_icache(page_address(page+i)); | ||
36 | set_bit(PG_arch_1, &page->flags); | ||
37 | } else { | ||
38 | rflags |= HPTE_R_N; | ||
39 | } | ||
40 | } | ||
41 | return rflags; | ||
42 | } | ||
43 | |||
44 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | 17 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
45 | pte_t *ptep, unsigned long trap, int local, int ssize, | 18 | pte_t *ptep, unsigned long trap, int local, int ssize, |
46 | unsigned int shift, unsigned int mmu_psize) | 19 | unsigned int shift, unsigned int mmu_psize) |
@@ -89,8 +62,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
89 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | 62 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) |
90 | /* No CPU has hugepages but lacks no execute, so we | 63 | /* No CPU has hugepages but lacks no execute, so we |
91 | * don't need to worry about that case */ | 64 | * don't need to worry about that case */ |
92 | rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), | 65 | rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); |
93 | trap, sz); | ||
94 | 66 | ||
95 | /* Check if pte already has an hpte (case 2) */ | 67 | /* Check if pte already has an hpte (case 2) */ |
96 | if (unlikely(old_pte & _PAGE_HASHPTE)) { | 68 | if (unlikely(old_pte & _PAGE_HASHPTE)) { |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 1bf065546fa..53b200abb02 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -344,27 +344,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |||
344 | } while (pgd++, addr = next, addr != end); | 344 | } while (pgd++, addr = next, addr != end); |
345 | } | 345 | } |
346 | 346 | ||
347 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
348 | pte_t *ptep, pte_t pte) | ||
349 | { | ||
350 | if (pte_present(*ptep)) { | ||
351 | /* We open-code pte_clear because we need to pass the right | ||
352 | * argument to hpte_need_flush (huge / !huge). Might not be | ||
353 | * necessary anymore if we make hpte_need_flush() get the | ||
354 | * page size from the slices | ||
355 | */ | ||
356 | pte_update(mm, addr, ptep, ~0UL, 1); | ||
357 | } | ||
358 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | ||
359 | } | ||
360 | |||
361 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
362 | pte_t *ptep) | ||
363 | { | ||
364 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1); | ||
365 | return __pte(old); | ||
366 | } | ||
367 | |||
368 | struct page * | 347 | struct page * |
369 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 348 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
370 | { | 349 | { |
@@ -580,3 +559,13 @@ static int __init hugetlbpage_init(void) | |||
580 | } | 559 | } |
581 | 560 | ||
582 | module_init(hugetlbpage_init); | 561 | module_init(hugetlbpage_init); |
562 | |||
563 | void flush_dcache_icache_hugepage(struct page *page) | ||
564 | { | ||
565 | int i; | ||
566 | |||
567 | BUG_ON(!PageCompound(page)); | ||
568 | |||
569 | for (i = 0; i < (1UL << compound_order(page)); i++) | ||
570 | __flush_dcache_icache(page_address(page+i)); | ||
571 | } | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 59736317bf0..b9b152558f9 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/pagemap.h> | 32 | #include <linux/pagemap.h> |
33 | #include <linux/suspend.h> | 33 | #include <linux/suspend.h> |
34 | #include <linux/lmb.h> | 34 | #include <linux/lmb.h> |
35 | #include <linux/hugetlb.h> | ||
35 | 36 | ||
36 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
37 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
@@ -417,18 +418,26 @@ EXPORT_SYMBOL(flush_dcache_page); | |||
417 | 418 | ||
418 | void flush_dcache_icache_page(struct page *page) | 419 | void flush_dcache_icache_page(struct page *page) |
419 | { | 420 | { |
421 | #ifdef CONFIG_HUGETLB_PAGE | ||
422 | if (PageCompound(page)) { | ||
423 | flush_dcache_icache_hugepage(page); | ||
424 | return; | ||
425 | } | ||
426 | #endif | ||
420 | #ifdef CONFIG_BOOKE | 427 | #ifdef CONFIG_BOOKE |
421 | void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); | 428 | { |
422 | __flush_dcache_icache(start); | 429 | void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); |
423 | kunmap_atomic(start, KM_PPC_SYNC_ICACHE); | 430 | __flush_dcache_icache(start); |
431 | kunmap_atomic(start, KM_PPC_SYNC_ICACHE); | ||
432 | } | ||
424 | #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) | 433 | #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) |
425 | /* On 8xx there is no need to kmap since highmem is not supported */ | 434 | /* On 8xx there is no need to kmap since highmem is not supported */ |
426 | __flush_dcache_icache(page_address(page)); | 435 | __flush_dcache_icache(page_address(page)); |
427 | #else | 436 | #else |
428 | __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); | 437 | __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); |
429 | #endif | 438 | #endif |
430 | |||
431 | } | 439 | } |
440 | |||
432 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | 441 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) |
433 | { | 442 | { |
434 | clear_page(page); | 443 | clear_page(page); |