aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/hugetlb.h25
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h1
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c30
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/mem.c17
6 files changed, 45 insertions, 61 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 038886834da5..5856a66ab404 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -6,6 +6,8 @@
6pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, 6pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
7 unsigned long addr, unsigned *shift); 7 unsigned long addr, unsigned *shift);
8 8
9void flush_dcache_icache_hugepage(struct page *page);
10
9int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 11int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
10 unsigned long len); 12 unsigned long len);
11 13
@@ -13,12 +15,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
13 unsigned long end, unsigned long floor, 15 unsigned long end, unsigned long floor,
14 unsigned long ceiling); 16 unsigned long ceiling);
15 17
16void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
17 pte_t *ptep, pte_t pte);
18
19pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
20 pte_t *ptep);
21
22/* 18/*
23 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs 19 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
24 * to override the version in mm/hugetlb.c 20 * to override the version in mm/hugetlb.c
@@ -44,9 +40,26 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
44{ 40{
45} 41}
46 42
43
44static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, pte_t pte)
46{
47 set_pte_at(mm, addr, ptep, pte);
48}
49
50static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
51 unsigned long addr, pte_t *ptep)
52{
53 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
54 return __pte(old);
55}
56
47static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 57static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
48 unsigned long addr, pte_t *ptep) 58 unsigned long addr, pte_t *ptep)
49{ 59{
60 pte_t pte;
61 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
62 flush_tlb_page(vma, addr);
50} 63}
51 64
52static inline int huge_pte_none(pte_t pte) 65static inline int huge_pte_none(pte_t pte)
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index dd50ea15e648..7514ec2f8540 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -245,6 +245,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
245 unsigned long vsid, pte_t *ptep, unsigned long trap, 245 unsigned long vsid, pte_t *ptep, unsigned long trap,
246 unsigned int local, int ssize); 246 unsigned int local, int ssize);
247struct mm_struct; 247struct mm_struct;
248unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
248extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); 249extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
249int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 250int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
250 pte_t *ptep, unsigned long trap, int local, int ssize, 251 pte_t *ptep, unsigned long trap, int local, int ssize,
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ef1f047f5431..fa251f8c2f82 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -775,7 +775,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
775 /* page is dirty */ 775 /* page is dirty */
776 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 776 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
777 if (trap == 0x400) { 777 if (trap == 0x400) {
778 __flush_dcache_icache(page_address(page)); 778 flush_dcache_icache_page(page);
779 set_bit(PG_arch_1, &page->flags); 779 set_bit(PG_arch_1, &page->flags);
780 } else 780 } else
781 pp |= HPTE_R_N; 781 pp |= HPTE_R_N;
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 1508ffc1e1e1..199539882f92 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -14,33 +14,6 @@
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/machdep.h> 15#include <asm/machdep.h>
16 16
17/*
18 * Called by asm hashtable.S for doing lazy icache flush
19 */
20static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
21 pte_t pte, int trap, unsigned long sz)
22{
23 struct page *page;
24 int i;
25
26 if (!pfn_valid(pte_pfn(pte)))
27 return rflags;
28
29 page = pte_page(pte);
30
31 /* page is dirty */
32 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
33 if (trap == 0x400) {
34 for (i = 0; i < (sz / PAGE_SIZE); i++)
35 __flush_dcache_icache(page_address(page+i));
36 set_bit(PG_arch_1, &page->flags);
37 } else {
38 rflags |= HPTE_R_N;
39 }
40 }
41 return rflags;
42}
43
44int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 17int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
45 pte_t *ptep, unsigned long trap, int local, int ssize, 18 pte_t *ptep, unsigned long trap, int local, int ssize,
46 unsigned int shift, unsigned int mmu_psize) 19 unsigned int shift, unsigned int mmu_psize)
@@ -89,8 +62,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
89 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 62 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
90 /* No CPU has hugepages but lacks no execute, so we 63 /* No CPU has hugepages but lacks no execute, so we
91 * don't need to worry about that case */ 64 * don't need to worry about that case */
92 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), 65 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
93 trap, sz);
94 66
95 /* Check if pte already has an hpte (case 2) */ 67 /* Check if pte already has an hpte (case 2) */
96 if (unlikely(old_pte & _PAGE_HASHPTE)) { 68 if (unlikely(old_pte & _PAGE_HASHPTE)) {
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1bf065546fa1..53b200abb025 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -344,27 +344,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
344 } while (pgd++, addr = next, addr != end); 344 } while (pgd++, addr = next, addr != end);
345} 345}
346 346
347void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
348 pte_t *ptep, pte_t pte)
349{
350 if (pte_present(*ptep)) {
351 /* We open-code pte_clear because we need to pass the right
352 * argument to hpte_need_flush (huge / !huge). Might not be
353 * necessary anymore if we make hpte_need_flush() get the
354 * page size from the slices
355 */
356 pte_update(mm, addr, ptep, ~0UL, 1);
357 }
358 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
359}
360
361pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
362 pte_t *ptep)
363{
364 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
365 return __pte(old);
366}
367
368struct page * 347struct page *
369follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 348follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
370{ 349{
@@ -580,3 +559,13 @@ static int __init hugetlbpage_init(void)
580} 559}
581 560
582module_init(hugetlbpage_init); 561module_init(hugetlbpage_init);
562
563void flush_dcache_icache_hugepage(struct page *page)
564{
565 int i;
566
567 BUG_ON(!PageCompound(page));
568
569 for (i = 0; i < (1UL << compound_order(page)); i++)
570 __flush_dcache_icache(page_address(page+i));
571}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 59736317bf0e..b9b152558f9c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,6 +32,7 @@
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/suspend.h> 33#include <linux/suspend.h>
34#include <linux/lmb.h> 34#include <linux/lmb.h>
35#include <linux/hugetlb.h>
35 36
36#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
37#include <asm/prom.h> 38#include <asm/prom.h>
@@ -417,18 +418,26 @@ EXPORT_SYMBOL(flush_dcache_page);
417 418
418void flush_dcache_icache_page(struct page *page) 419void flush_dcache_icache_page(struct page *page)
419{ 420{
421#ifdef CONFIG_HUGETLB_PAGE
422 if (PageCompound(page)) {
423 flush_dcache_icache_hugepage(page);
424 return;
425 }
426#endif
420#ifdef CONFIG_BOOKE 427#ifdef CONFIG_BOOKE
421 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 428 {
422 __flush_dcache_icache(start); 429 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
423 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 430 __flush_dcache_icache(start);
431 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
432 }
424#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 433#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
425 /* On 8xx there is no need to kmap since highmem is not supported */ 434 /* On 8xx there is no need to kmap since highmem is not supported */
426 __flush_dcache_icache(page_address(page)); 435 __flush_dcache_icache(page_address(page));
427#else 436#else
428 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 437 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
429#endif 438#endif
430
431} 439}
440
432void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 441void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
433{ 442{
434 clear_page(page); 443 clear_page(page);