diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-12-18 14:13:51 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-20 22:21:16 -0500 |
commit | 64b3d0e8122b422e879b23d42f9e0e8efbbf9744 (patch) | |
tree | b1fab3fc39fd3117d0c050b0a54d6fe09f3a2948 | |
parent | 77520351805cc19ba37394ae33f862ef6d3c2a23 (diff) |
powerpc/mm: Rework usage of _PAGE_COHERENT/NO_CACHE/GUARDED
Currently, we never set _PAGE_COHERENT in the PTEs, we just OR it in
in the hash code based on some CPU feature bit. We also manipulate
_PAGE_NO_CACHE and _PAGE_GUARDED by hand in all sorts of places.
This changes the logic so that instead, the PTE now contains
_PAGE_COHERENT for all normal RAM pages thay have I = 0 on platforms
that need it. The hash code clears it if the feature bit is not set.
It also adds some clean accessors to setup various valid combinations
of access flags and change various bits of code to use them instead.
This should help having the PTE actually containing the bit
combinations that we really want.
I also removed _PAGE_GUARDED from _PAGE_BASE on 44x and instead
set it explicitely from the TLB miss. I will ultimately remove it
completely as it appears that it might not be needed after all
but in the meantime, having it in the TLB miss makes things a
lot easier.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc32.h | 42 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 13 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 26 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci-common.c | 24 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 27 | ||||
-rw-r--r-- | drivers/video/controlfb.c | 4 |
9 files changed, 68 insertions, 77 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 6ab7c67cb5ab..f69a4d977729 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h | |||
@@ -228,9 +228,10 @@ extern int icache_44x_need_flush; | |||
228 | * - FILE *must* be in the bottom three bits because swap cache | 228 | * - FILE *must* be in the bottom three bits because swap cache |
229 | * entries use the top 29 bits for TLB2. | 229 | * entries use the top 29 bits for TLB2. |
230 | * | 230 | * |
231 | * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it | 231 | * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, |
232 | * doesn't support SMP. So we can use this as software bit, like | 232 | * because it doesn't support SMP. However, some later 460 variants |
233 | * DIRTY. | 233 | * have -some- form of SMP support and so I keep the bit there for |
234 | * future use | ||
234 | * | 235 | * |
235 | * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used | 236 | * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used |
236 | * for memory protection related functions (see PTE structure in | 237 | * for memory protection related functions (see PTE structure in |
@@ -436,20 +437,23 @@ extern int icache_44x_need_flush; | |||
436 | _PAGE_USER | _PAGE_ACCESSED | \ | 437 | _PAGE_USER | _PAGE_ACCESSED | \ |
437 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ | 438 | _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ |
438 | _PAGE_EXEC | _PAGE_HWEXEC) | 439 | _PAGE_EXEC | _PAGE_HWEXEC) |
440 | |||
439 | /* | 441 | /* |
440 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware | 442 | * We define 2 sets of base prot bits, one for basic pages (ie, |
441 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need | 443 | * cacheable kernel and user pages) and one for non cacheable |
442 | * to have it in the Linux PTE, and in fact the bit could be reused for | 444 | * pages. We always set _PAGE_COHERENT when SMP is enabled or |
443 | * another purpose. -- paulus. | 445 | * the processor might need it for DMA coherency. |
444 | */ | 446 | */ |
445 | 447 | #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) | |
446 | #ifdef CONFIG_44x | 448 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) |
447 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED) | ||
448 | #else | 449 | #else |
449 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) | 450 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) |
450 | #endif | 451 | #endif |
452 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) | ||
453 | |||
451 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) | 454 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) |
452 | #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) | 455 | #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) |
456 | #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) | ||
453 | 457 | ||
454 | #ifdef CONFIG_PPC_STD_MMU | 458 | #ifdef CONFIG_PPC_STD_MMU |
455 | /* On standard PPC MMU, no user access implies kernel read/write access, | 459 | /* On standard PPC MMU, no user access implies kernel read/write access, |
@@ -459,7 +463,7 @@ extern int icache_44x_need_flush; | |||
459 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) | 463 | #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) |
460 | #endif | 464 | #endif |
461 | 465 | ||
462 | #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) | 466 | #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) |
463 | #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) | 467 | #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) |
464 | 468 | ||
465 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | 469 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
@@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; | |||
552 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 556 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
553 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | 557 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
554 | 558 | ||
555 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | ||
556 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | ||
557 | |||
558 | static inline pte_t pte_wrprotect(pte_t pte) { | 559 | static inline pte_t pte_wrprotect(pte_t pte) { |
559 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | 560 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } |
560 | static inline pte_t pte_mkclean(pte_t pte) { | 561 | static inline pte_t pte_mkclean(pte_t pte) { |
@@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
693 | #endif | 694 | #endif |
694 | } | 695 | } |
695 | 696 | ||
697 | |||
696 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 698 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
697 | pte_t *ptep, pte_t pte) | 699 | pte_t *ptep, pte_t pte) |
698 | { | 700 | { |
699 | #if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) | 701 | #if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM) |
700 | WARN_ON(pte_present(*ptep)); | 702 | WARN_ON(pte_present(*ptep)); |
701 | #endif | 703 | #endif |
702 | __set_pte_at(mm, addr, ptep, pte); | 704 | __set_pte_at(mm, addr, ptep, pte); |
@@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |||
760 | __changed; \ | 762 | __changed; \ |
761 | }) | 763 | }) |
762 | 764 | ||
763 | /* | ||
764 | * Macro to mark a page protection value as "uncacheable". | ||
765 | */ | ||
766 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) | ||
767 | |||
768 | struct file; | ||
769 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
770 | unsigned long size, pgprot_t vma_prot); | ||
771 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
772 | |||
773 | #define __HAVE_ARCH_PTE_SAME | 765 | #define __HAVE_ARCH_PTE_SAME |
774 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | 766 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) |
775 | 767 | ||
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 1f0a330f03f4..b0f18be81d9f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | |||
245 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | 245 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} |
246 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | 246 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
247 | 247 | ||
248 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | ||
249 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | ||
250 | |||
251 | static inline pte_t pte_wrprotect(pte_t pte) { | 248 | static inline pte_t pte_wrprotect(pte_t pte) { |
252 | pte_val(pte) &= ~(_PAGE_RW); return pte; } | 249 | pte_val(pte) &= ~(_PAGE_RW); return pte; } |
253 | static inline pte_t pte_mkclean(pte_t pte) { | 250 | static inline pte_t pte_mkclean(pte_t pte) { |
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |||
405 | __changed; \ | 402 | __changed; \ |
406 | }) | 403 | }) |
407 | 404 | ||
408 | /* | ||
409 | * Macro to mark a page protection value as "uncacheable". | ||
410 | */ | ||
411 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) | ||
412 | |||
413 | struct file; | ||
414 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
415 | unsigned long size, pgprot_t vma_prot); | ||
416 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
417 | |||
418 | #define __HAVE_ARCH_PTE_SAME | 405 | #define __HAVE_ARCH_PTE_SAME |
419 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | 406 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) |
420 | 407 | ||
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index dbb8ca172e44..07f55e601696 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -16,6 +16,32 @@ struct mm_struct; | |||
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
19 | |||
20 | /* | ||
21 | * Macro to mark a page protection value as "uncacheable". | ||
22 | */ | ||
23 | |||
24 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
25 | _PAGE_WRITETHRU) | ||
26 | |||
27 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
28 | _PAGE_NO_CACHE | _PAGE_GUARDED)) | ||
29 | |||
30 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
31 | _PAGE_NO_CACHE)) | ||
32 | |||
33 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
34 | _PAGE_COHERENT)) | ||
35 | |||
36 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
37 | _PAGE_COHERENT | _PAGE_WRITETHRU)) | ||
38 | |||
39 | |||
40 | struct file; | ||
41 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
42 | unsigned long size, pgprot_t vma_prot); | ||
43 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
44 | |||
19 | /* | 45 | /* |
20 | * ZERO_PAGE is a global shared page that is always zero: used | 46 | * ZERO_PAGE is a global shared page that is always zero: used |
21 | * for zero-mapped memory areas etc.. | 47 | * for zero-mapped memory areas etc.. |
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index f3a1ea9d7fe4..26237357a88c 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -570,6 +570,7 @@ finish_tlb_load: | |||
570 | rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ | 570 | rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ |
571 | and r11,r12,r10 /* Mask PTE bits to keep */ | 571 | and r11,r12,r10 /* Mask PTE bits to keep */ |
572 | andi. r10,r12,_PAGE_USER /* User page ? */ | 572 | andi. r10,r12,_PAGE_USER /* User page ? */ |
573 | ori r11,r11,_PAGE_GUARDED /* 440 errata, needs G set */ | ||
573 | beq 1f /* nope, leave U bits empty */ | 574 | beq 1f /* nope, leave U bits empty */ |
574 | rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ | 575 | rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ |
575 | 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ | 576 | 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 1a32db331a5c..2538030954d8 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -370,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |||
370 | } | 370 | } |
371 | 371 | ||
372 | /* XXX would be nice to have a way to ask for write-through */ | 372 | /* XXX would be nice to have a way to ask for write-through */ |
373 | prot |= _PAGE_NO_CACHE; | ||
374 | if (write_combine) | 373 | if (write_combine) |
375 | prot &= ~_PAGE_GUARDED; | 374 | return pgprot_noncached_wc(prot); |
376 | else | 375 | else |
377 | prot |= _PAGE_GUARDED; | 376 | return pgprot_noncached(prot); |
378 | |||
379 | return __pgprot(prot); | ||
380 | } | 377 | } |
381 | 378 | ||
382 | /* | 379 | /* |
@@ -387,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |||
387 | pgprot_t pci_phys_mem_access_prot(struct file *file, | 384 | pgprot_t pci_phys_mem_access_prot(struct file *file, |
388 | unsigned long pfn, | 385 | unsigned long pfn, |
389 | unsigned long size, | 386 | unsigned long size, |
390 | pgprot_t protection) | 387 | pgprot_t prot) |
391 | { | 388 | { |
392 | struct pci_dev *pdev = NULL; | 389 | struct pci_dev *pdev = NULL; |
393 | struct resource *found = NULL; | 390 | struct resource *found = NULL; |
394 | unsigned long prot = pgprot_val(protection); | ||
395 | resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; | 391 | resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; |
396 | int i; | 392 | int i; |
397 | 393 | ||
398 | if (page_is_ram(pfn)) | 394 | if (page_is_ram(pfn)) |
399 | return __pgprot(prot); | 395 | return prot; |
400 | |||
401 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | ||
402 | 396 | ||
397 | prot = pgprot_noncached(prot); | ||
403 | for_each_pci_dev(pdev) { | 398 | for_each_pci_dev(pdev) { |
404 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | 399 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { |
405 | struct resource *rp = &pdev->resource[i]; | 400 | struct resource *rp = &pdev->resource[i]; |
@@ -420,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, | |||
420 | } | 415 | } |
421 | if (found) { | 416 | if (found) { |
422 | if (found->flags & IORESOURCE_PREFETCH) | 417 | if (found->flags & IORESOURCE_PREFETCH) |
423 | prot &= ~_PAGE_GUARDED; | 418 | prot = pgprot_noncached_wc(prot); |
424 | pci_dev_put(pdev); | 419 | pci_dev_put(pdev); |
425 | } | 420 | } |
426 | 421 | ||
427 | pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", | 422 | pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", |
428 | (unsigned long long)offset, prot); | 423 | (unsigned long long)offset, pgprot_val(prot)); |
429 | 424 | ||
430 | return __pgprot(prot); | 425 | return prot; |
431 | } | 426 | } |
432 | 427 | ||
433 | 428 | ||
@@ -583,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus, | |||
583 | pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); | 578 | pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); |
584 | 579 | ||
585 | vma->vm_pgoff = offset >> PAGE_SHIFT; | 580 | vma->vm_pgoff = offset >> PAGE_SHIFT; |
586 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 581 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
587 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
588 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 582 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
589 | vma->vm_end - vma->vm_start, | 583 | vma->vm_end - vma->vm_start, |
590 | vma->vm_page_prot); | 584 | vma->vm_page_prot); |
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index c8eac22a8f00..28845604a10c 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S | |||
@@ -323,8 +323,8 @@ _GLOBAL(create_hpte) | |||
323 | ori r8,r8,0xe14 /* clear out reserved bits and M */ | 323 | ori r8,r8,0xe14 /* clear out reserved bits and M */ |
324 | andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ | 324 | andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ |
325 | BEGIN_FTR_SECTION | 325 | BEGIN_FTR_SECTION |
326 | ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ | 326 | rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ |
327 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) | 327 | END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) |
328 | #ifdef CONFIG_PTE_64BIT | 328 | #ifdef CONFIG_PTE_64BIT |
329 | /* Put the XPN bits into the PTE */ | 329 | /* Put the XPN bits into the PTE */ |
330 | rlwimi r8,r10,8,20,22 | 330 | rlwimi r8,r10,8,20,22 |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 8fee696fb795..53b06ebb3f2f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -102,8 +102,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
102 | return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); | 102 | return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); |
103 | 103 | ||
104 | if (!page_is_ram(pfn)) | 104 | if (!page_is_ram(pfn)) |
105 | vma_prot = __pgprot(pgprot_val(vma_prot) | 105 | vma_prot = pgprot_noncached(vma_prot); |
106 | | _PAGE_GUARDED | _PAGE_NO_CACHE); | 106 | |
107 | return vma_prot; | 107 | return vma_prot; |
108 | } | 108 | } |
109 | EXPORT_SYMBOL(phys_mem_access_prot); | 109 | EXPORT_SYMBOL(phys_mem_access_prot); |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 1b26071a86ca..7106b63d401b 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -273,12 +273,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
273 | return VM_FAULT_NOPAGE; | 273 | return VM_FAULT_NOPAGE; |
274 | 274 | ||
275 | if (ctx->state == SPU_STATE_SAVED) { | 275 | if (ctx->state == SPU_STATE_SAVED) { |
276 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 276 | vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); |
277 | & ~_PAGE_NO_CACHE); | ||
278 | pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); | 277 | pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); |
279 | } else { | 278 | } else { |
280 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 279 | vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); |
281 | | _PAGE_NO_CACHE); | ||
282 | pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; | 280 | pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; |
283 | } | 281 | } |
284 | vm_insert_pfn(vma, address, pfn); | 282 | vm_insert_pfn(vma, address, pfn); |
@@ -338,8 +336,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) | |||
338 | return -EINVAL; | 336 | return -EINVAL; |
339 | 337 | ||
340 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 338 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
341 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 339 | vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); |
342 | | _PAGE_NO_CACHE); | ||
343 | 340 | ||
344 | vma->vm_ops = &spufs_mem_mmap_vmops; | 341 | vma->vm_ops = &spufs_mem_mmap_vmops; |
345 | return 0; | 342 | return 0; |
@@ -452,8 +449,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) | |||
452 | return -EINVAL; | 449 | return -EINVAL; |
453 | 450 | ||
454 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 451 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
455 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 452 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
456 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
457 | 453 | ||
458 | vma->vm_ops = &spufs_cntl_mmap_vmops; | 454 | vma->vm_ops = &spufs_cntl_mmap_vmops; |
459 | return 0; | 455 | return 0; |
@@ -1155,8 +1151,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) | |||
1155 | return -EINVAL; | 1151 | return -EINVAL; |
1156 | 1152 | ||
1157 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 1153 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
1158 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 1154 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1159 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
1160 | 1155 | ||
1161 | vma->vm_ops = &spufs_signal1_mmap_vmops; | 1156 | vma->vm_ops = &spufs_signal1_mmap_vmops; |
1162 | return 0; | 1157 | return 0; |
@@ -1292,8 +1287,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) | |||
1292 | return -EINVAL; | 1287 | return -EINVAL; |
1293 | 1288 | ||
1294 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 1289 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
1295 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 1290 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1296 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
1297 | 1291 | ||
1298 | vma->vm_ops = &spufs_signal2_mmap_vmops; | 1292 | vma->vm_ops = &spufs_signal2_mmap_vmops; |
1299 | return 0; | 1293 | return 0; |
@@ -1414,8 +1408,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) | |||
1414 | return -EINVAL; | 1408 | return -EINVAL; |
1415 | 1409 | ||
1416 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 1410 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
1417 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 1411 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1418 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
1419 | 1412 | ||
1420 | vma->vm_ops = &spufs_mss_mmap_vmops; | 1413 | vma->vm_ops = &spufs_mss_mmap_vmops; |
1421 | return 0; | 1414 | return 0; |
@@ -1476,8 +1469,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) | |||
1476 | return -EINVAL; | 1469 | return -EINVAL; |
1477 | 1470 | ||
1478 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 1471 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
1479 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 1472 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1480 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
1481 | 1473 | ||
1482 | vma->vm_ops = &spufs_psmap_mmap_vmops; | 1474 | vma->vm_ops = &spufs_psmap_mmap_vmops; |
1483 | return 0; | 1475 | return 0; |
@@ -1536,8 +1528,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) | |||
1536 | return -EINVAL; | 1528 | return -EINVAL; |
1537 | 1529 | ||
1538 | vma->vm_flags |= VM_IO | VM_PFNMAP; | 1530 | vma->vm_flags |= VM_IO | VM_PFNMAP; |
1539 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | 1531 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1540 | | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
1541 | 1532 | ||
1542 | vma->vm_ops = &spufs_mfc_mmap_vmops; | 1533 | vma->vm_ops = &spufs_mfc_mmap_vmops; |
1543 | return 0; | 1534 | return 0; |
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c index b0be7eac32d8..49fcbe8f18ac 100644 --- a/drivers/video/controlfb.c +++ b/drivers/video/controlfb.c | |||
@@ -298,10 +298,10 @@ static int controlfb_mmap(struct fb_info *info, | |||
298 | return -EINVAL; | 298 | return -EINVAL; |
299 | start = info->fix.mmio_start; | 299 | start = info->fix.mmio_start; |
300 | len = PAGE_ALIGN((start & ~PAGE_MASK)+info->fix.mmio_len); | 300 | len = PAGE_ALIGN((start & ~PAGE_MASK)+info->fix.mmio_len); |
301 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE|_PAGE_GUARDED; | 301 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
302 | } else { | 302 | } else { |
303 | /* framebuffer */ | 303 | /* framebuffer */ |
304 | pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; | 304 | vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot); |
305 | } | 305 | } |
306 | start &= PAGE_MASK; | 306 | start &= PAGE_MASK; |
307 | if ((vma->vm_end - vma->vm_start + off) > len) | 307 | if ((vma->vm_end - vma->vm_start + off) > len) |