aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgtable-ppc64.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 14:13:51 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-20 22:21:16 -0500
commit64b3d0e8122b422e879b23d42f9e0e8efbbf9744 (patch)
treeb1fab3fc39fd3117d0c050b0a54d6fe09f3a2948 /arch/powerpc/include/asm/pgtable-ppc64.h
parent77520351805cc19ba37394ae33f862ef6d3c2a23 (diff)
powerpc/mm: Rework usage of _PAGE_COHERENT/NO_CACHE/GUARDED
Currently, we never set _PAGE_COHERENT in the PTEs, we just OR it in in the hash code based on some CPU feature bit. We also manipulate _PAGE_NO_CACHE and _PAGE_GUARDED by hand in all sorts of places. This changes the logic so that instead, the PTE now contains _PAGE_COHERENT for all normal RAM pages thay have I = 0 on platforms that need it. The hash code clears it if the feature bit is not set. It also adds some clean accessors to setup various valid combinations of access flags and change various bits of code to use them instead. This should help having the PTE actually containing the bit combinations that we really want. I also removed _PAGE_GUARDED from _PAGE_BASE on 44x and instead set it explicitely from the TLB miss. I will ultimately remove it completely as it appears that it might not be needed after all but in the meantime, having it in the TLB miss makes things a lot easier. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h13
1 files changed, 0 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 1f0a330f03f4..b0f18be81d9f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
247 247
248static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
249static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
250
251static inline pte_t pte_wrprotect(pte_t pte) { 248static inline pte_t pte_wrprotect(pte_t pte) {
252 pte_val(pte) &= ~(_PAGE_RW); return pte; } 249 pte_val(pte) &= ~(_PAGE_RW); return pte; }
253static inline pte_t pte_mkclean(pte_t pte) { 250static inline pte_t pte_mkclean(pte_t pte) {
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
405 __changed; \ 402 __changed; \
406}) 403})
407 404
408/*
409 * Macro to mark a page protection value as "uncacheable".
410 */
411#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
412
413struct file;
414extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
415 unsigned long size, pgprot_t vma_prot);
416#define __HAVE_PHYS_MEM_ACCESS_PROT
417
418#define __HAVE_ARCH_PTE_SAME 405#define __HAVE_ARCH_PTE_SAME
419#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 406#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
420 407