aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-4k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-64k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h58
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h29
-rw-r--r--arch/powerpc/include/asm/pgtable.h84
6 files changed, 100 insertions, 77 deletions
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 04e4a620952..a286e47100b 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -99,7 +99,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
99#ifdef CONFIG_DEBUG_HIGHMEM 99#ifdef CONFIG_DEBUG_HIGHMEM
100 BUG_ON(!pte_none(*(kmap_pte-idx))); 100 BUG_ON(!pte_none(*(kmap_pte-idx)));
101#endif 101#endif
102 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 102 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
103 local_flush_tlb_page(NULL, vaddr); 103 local_flush_tlb_page(NULL, vaddr);
104 104
105 return (void*) vaddr; 105 return (void*) vaddr;
diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-4k.h
index 6b18ba9d2d8..1dbca4e7de6 100644
--- a/arch/powerpc/include/asm/pgtable-4k.h
+++ b/arch/powerpc/include/asm/pgtable-4k.h
@@ -60,7 +60,7 @@
60/* It should be preserving the high 48 bits and then specifically */ 60/* It should be preserving the high 48 bits and then specifically */
61/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 61/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
62#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ 62#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
63 _PAGE_HPTEFLAGS) 63 _PAGE_HPTEFLAGS | _PAGE_SPECIAL)
64 64
65/* Bits to mask out from a PMD to get to the PTE page */ 65/* Bits to mask out from a PMD to get to the PTE page */
66#define PMD_MASKED_BITS 0 66#define PMD_MASKED_BITS 0
diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pgtable-64k.h
index 07b0d8f09cb..7389003349a 100644
--- a/arch/powerpc/include/asm/pgtable-64k.h
+++ b/arch/powerpc/include/asm/pgtable-64k.h
@@ -114,7 +114,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
114 * pgprot changes 114 * pgprot changes
115 */ 115 */
116#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 116#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
117 _PAGE_ACCESSED) 117 _PAGE_ACCESSED | _PAGE_SPECIAL)
118 118
119/* Bits to mask out from a PMD to get to the PTE page */ 119/* Bits to mask out from a PMD to get to the PTE page */
120#define PMD_MASKED_BITS 0x1ff 120#define PMD_MASKED_BITS 0x1ff
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index f69a4d97772..98bd7c5fcd0 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -429,8 +429,10 @@ extern int icache_44x_need_flush;
429#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 429#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
430#endif 430#endif
431 431
432#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 432#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
433 433
434#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
435 _PAGE_SPECIAL)
434 436
435#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ 437#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
436 _PAGE_WRITETHRU | _PAGE_ENDIAN | \ 438 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
@@ -667,44 +669,6 @@ static inline unsigned long long pte_update(pte_t *p,
667#endif /* CONFIG_PTE_64BIT */ 669#endif /* CONFIG_PTE_64BIT */
668 670
669/* 671/*
670 * set_pte stores a linux PTE into the linux page table.
671 * On machines which use an MMU hash table we avoid changing the
672 * _PAGE_HASHPTE bit.
673 */
674
675static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
676 pte_t *ptep, pte_t pte)
677{
678#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
679 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
680#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
681#if _PAGE_HASHPTE != 0
682 if (pte_val(*ptep) & _PAGE_HASHPTE)
683 flush_hash_entry(mm, ptep, addr);
684#endif
685 __asm__ __volatile__("\
686 stw%U0%X0 %2,%0\n\
687 eieio\n\
688 stw%U0%X0 %L2,%1"
689 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
690 : "r" (pte) : "memory");
691#else
692 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
693 | (pte_val(pte) & ~_PAGE_HASHPTE));
694#endif
695}
696
697
698static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
699 pte_t *ptep, pte_t pte)
700{
701#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
702 WARN_ON(pte_present(*ptep));
703#endif
704 __set_pte_at(mm, addr, ptep, pte);
705}
706
707/*
708 * 2.6 calls this without flushing the TLB entry; this is wrong 672 * 2.6 calls this without flushing the TLB entry; this is wrong
709 * for our hash-based implementation, we fix that up here. 673 * for our hash-based implementation, we fix that up here.
710 */ 674 */
@@ -744,24 +708,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
744} 708}
745 709
746 710
747#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 711static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
748static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
749{ 712{
750 unsigned long bits = pte_val(entry) & 713 unsigned long bits = pte_val(entry) &
751 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); 714 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
715 _PAGE_HWEXEC | _PAGE_EXEC);
752 pte_update(ptep, 0, bits); 716 pte_update(ptep, 0, bits);
753} 717}
754 718
755#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
756({ \
757 int __changed = !pte_same(*(__ptep), __entry); \
758 if (__changed) { \
759 __ptep_set_access_flags(__ptep, __entry, __dirty); \
760 flush_tlb_page_nohash(__vma, __address); \
761 } \
762 __changed; \
763})
764
765#define __HAVE_ARCH_PTE_SAME 719#define __HAVE_ARCH_PTE_SAME
766#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 720#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
767 721
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index b0f18be81d9..c627877fcf1 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -125,6 +125,8 @@
125#define _PTEIDX_SECONDARY 0x8 125#define _PTEIDX_SECONDARY 0x8
126#define _PTEIDX_GROUP_IX 0x7 126#define _PTEIDX_GROUP_IX 0x7
127 127
128/* To make some generic powerpc code happy */
129#define _PAGE_HWEXEC 0
128 130
129/* 131/*
130 * POWER4 and newer have per page execute protection, older chips can only 132 * POWER4 and newer have per page execute protection, older chips can only
@@ -285,6 +287,10 @@ static inline unsigned long pte_update(struct mm_struct *mm,
285 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 287 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
286 : "cc" ); 288 : "cc" );
287 289
290 /* huge pages use the old page table lock */
291 if (!huge)
292 assert_pte_locked(mm, addr);
293
288 if (old & _PAGE_HASHPTE) 294 if (old & _PAGE_HASHPTE)
289 hpte_need_flush(mm, addr, ptep, old, huge); 295 hpte_need_flush(mm, addr, ptep, old, huge);
290 return old; 296 return old;
@@ -359,23 +365,11 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
359 pte_update(mm, addr, ptep, ~0UL, 0); 365 pte_update(mm, addr, ptep, ~0UL, 0);
360} 366}
361 367
362/*
363 * set_pte stores a linux PTE into the linux page table.
364 */
365static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
366 pte_t *ptep, pte_t pte)
367{
368 if (pte_present(*ptep))
369 pte_clear(mm, addr, ptep);
370 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
371 *ptep = pte;
372}
373 368
374/* Set the dirty and/or accessed bits atomically in a linux PTE, this 369/* Set the dirty and/or accessed bits atomically in a linux PTE, this
375 * function doesn't need to flush the hash entry 370 * function doesn't need to flush the hash entry
376 */ 371 */
377#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 372static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
378static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
379{ 373{
380 unsigned long bits = pte_val(entry) & 374 unsigned long bits = pte_val(entry) &
381 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 375 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -392,15 +386,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
392 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 386 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
393 :"cc"); 387 :"cc");
394} 388}
395#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
396({ \
397 int __changed = !pte_same(*(__ptep), __entry); \
398 if (__changed) { \
399 __ptep_set_access_flags(__ptep, __entry, __dirty); \
400 flush_tlb_page_nohash(__vma, __address); \
401 } \
402 __changed; \
403})
404 389
405#define __HAVE_ARCH_PTE_SAME 390#define __HAVE_ARCH_PTE_SAME
406#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 391#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 07f55e60169..5c1c4880723 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -6,7 +6,17 @@
6#include <asm/processor.h> /* For TASK_SIZE */ 6#include <asm/processor.h> /* For TASK_SIZE */
7#include <asm/mmu.h> 7#include <asm/mmu.h>
8#include <asm/page.h> 8#include <asm/page.h>
9
9struct mm_struct; 10struct mm_struct;
11
12#ifdef CONFIG_DEBUG_VM
13extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
14#else /* CONFIG_DEBUG_VM */
15static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
16{
17}
18#endif /* !CONFIG_DEBUG_VM */
19
10#endif /* !__ASSEMBLY__ */ 20#endif /* !__ASSEMBLY__ */
11 21
12#if defined(CONFIG_PPC64) 22#if defined(CONFIG_PPC64)
@@ -17,6 +27,80 @@ struct mm_struct;
17 27
18#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
19 29
30/* Insert a PTE, top-level function is out of line. It uses an inline
31 * low level function in the respective pgtable-* files
32 */
33extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
34 pte_t pte);
35
36/* This low level function performs the actual PTE insertion
37 * Setting the PTE depends on the MMU type and other factors. It's
38 * an horrible mess that I'm not going to try to clean up now but
39 * I'm keeping it in one place rather than spread around
40 */
41static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
42 pte_t *ptep, pte_t pte, int percpu)
43{
44#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
45 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
46 * helper pte_update() which does an atomic update. We need to do that
47 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
48 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
49 * the hash bits instead (ie, same as the non-SMP case)
50 */
51 if (percpu)
52 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
53 | (pte_val(pte) & ~_PAGE_HASHPTE));
54 else
55 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
56
57#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
58 /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
59 * can just store as long as we do the two halves in the right order
60 * with a barrier in between. This is possible because we take care,
61 * in the hash code, to pre-invalidate if the PTE was already hashed,
62 * which synchronizes us with any concurrent invalidation.
63 * In the percpu case, we also fallback to the simple update preserving
64 * the hash bits
65 */
66 if (percpu) {
67 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
68 | (pte_val(pte) & ~_PAGE_HASHPTE));
69 return;
70 }
71#if _PAGE_HASHPTE != 0
72 if (pte_val(*ptep) & _PAGE_HASHPTE)
73 flush_hash_entry(mm, ptep, addr);
74#endif
75 __asm__ __volatile__("\
76 stw%U0%X0 %2,%0\n\
77 eieio\n\
78 stw%U0%X0 %L2,%1"
79 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
80 : "r" (pte) : "memory");
81
82#elif defined(CONFIG_PPC_STD_MMU_32)
83 /* Third case is 32-bit hash table in UP mode, we need to preserve
84 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
85 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
86 * and see we need to keep track that this PTE needs invalidating
87 */
88 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
89 | (pte_val(pte) & ~_PAGE_HASHPTE));
90
91#else
92 /* Anything else just stores the PTE normally. That covers all 64-bit
93 * cases, and 32-bit non-hash with 64-bit PTEs in UP mode
94 */
95 *ptep = pte;
96#endif
97}
98
99
100#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
101extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
102 pte_t *ptep, pte_t entry, int dirty);
103
20/* 104/*
21 * Macro to mark a page protection value as "uncacheable". 105 * Macro to mark a page protection value as "uncacheable".
22 */ 106 */