diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Makefile_32 | 6 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 17 | ||||
-rw-r--r-- | arch/sh/mm/pg-mmu.c (renamed from arch/sh/mm/pg-sh4.c) | 19 |
3 files changed, 31 insertions, 11 deletions
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 5c04bbb08d36..62e280734dcb 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 | |||
@@ -15,7 +15,7 @@ endif | |||
15 | obj-y += $(cache-y) | 15 | obj-y += $(cache-y) |
16 | 16 | ||
17 | mmu-y := tlb-nommu.o pg-nommu.o | 17 | mmu-y := tlb-nommu.o pg-nommu.o |
18 | mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o | 18 | mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o |
19 | 19 | ||
20 | obj-y += $(mmu-y) | 20 | obj-y += $(mmu-y) |
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | 21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o |
@@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | |||
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | 29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o |
30 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | 30 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o |
31 | obj-y += $(tlb-y) | 31 | obj-y += $(tlb-y) |
32 | ifndef CONFIG_CACHE_OFF | ||
33 | obj-$(CONFIG_CPU_SH4) += pg-sh4.o | ||
34 | obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o | ||
35 | endif | ||
36 | endif | 32 | endif |
37 | 33 | ||
38 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 34 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 86762092508c..3e2d7321b636 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
831 | else | 831 | else |
832 | sh64_clear_user_page_coloured(to, address); | 832 | sh64_clear_user_page_coloured(to, address); |
833 | } | 833 | } |
834 | |||
835 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
836 | unsigned long vaddr, void *dst, const void *src, | ||
837 | unsigned long len) | ||
838 | { | ||
839 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
840 | memcpy(dst, src, len); | ||
841 | flush_icache_user_range(vma, page, vaddr, len); | ||
842 | } | ||
843 | |||
844 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
845 | unsigned long vaddr, void *dst, const void *src, | ||
846 | unsigned long len) | ||
847 | { | ||
848 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
849 | memcpy(dst, src, len); | ||
850 | } | ||
834 | #endif | 851 | #endif |
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-mmu.c index 4d93070b8220..356d2cdcb209 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-mmu.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/pg-sh4.c | 2 | * arch/sh/mm/pg-mmu.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2002 - 2009 Paul Mundt | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
@@ -22,11 +22,13 @@ static pte_t *kmap_coherent_pte; | |||
22 | 22 | ||
23 | void __init kmap_coherent_init(void) | 23 | void __init kmap_coherent_init(void) |
24 | { | 24 | { |
25 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | ||
25 | unsigned long vaddr; | 26 | unsigned long vaddr; |
26 | 27 | ||
27 | /* cache the first coherent kmap pte */ | 28 | /* cache the first coherent kmap pte */ |
28 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | 29 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); |
29 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | 30 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); |
31 | #endif | ||
30 | } | 32 | } |
31 | 33 | ||
32 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | 34 | static inline void *kmap_coherent(struct page *page, unsigned long addr) |
@@ -62,13 +64,15 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
62 | unsigned long vaddr, void *dst, const void *src, | 64 | unsigned long vaddr, void *dst, const void *src, |
63 | unsigned long len) | 65 | unsigned long len) |
64 | { | 66 | { |
65 | if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { | 67 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
68 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
66 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 69 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
67 | memcpy(vto, src, len); | 70 | memcpy(vto, src, len); |
68 | kunmap_coherent(vto); | 71 | kunmap_coherent(vto); |
69 | } else { | 72 | } else { |
70 | memcpy(dst, src, len); | 73 | memcpy(dst, src, len); |
71 | set_bit(PG_dcache_dirty, &page->flags); | 74 | if (boot_cpu_data.dcache.n_aliases) |
75 | set_bit(PG_dcache_dirty, &page->flags); | ||
72 | } | 76 | } |
73 | 77 | ||
74 | if (vma->vm_flags & VM_EXEC) | 78 | if (vma->vm_flags & VM_EXEC) |
@@ -79,13 +83,15 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
79 | unsigned long vaddr, void *dst, const void *src, | 83 | unsigned long vaddr, void *dst, const void *src, |
80 | unsigned long len) | 84 | unsigned long len) |
81 | { | 85 | { |
82 | if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { | 86 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
87 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
83 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 88 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
84 | memcpy(dst, vfrom, len); | 89 | memcpy(dst, vfrom, len); |
85 | kunmap_coherent(vfrom); | 90 | kunmap_coherent(vfrom); |
86 | } else { | 91 | } else { |
87 | memcpy(dst, src, len); | 92 | memcpy(dst, src, len); |
88 | set_bit(PG_dcache_dirty, &page->flags); | 93 | if (boot_cpu_data.dcache.n_aliases) |
94 | set_bit(PG_dcache_dirty, &page->flags); | ||
89 | } | 95 | } |
90 | } | 96 | } |
91 | 97 | ||
@@ -96,7 +102,8 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
96 | 102 | ||
97 | vto = kmap_atomic(to, KM_USER1); | 103 | vto = kmap_atomic(to, KM_USER1); |
98 | 104 | ||
99 | if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { | 105 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && |
106 | !test_bit(PG_dcache_dirty, &from->flags)) { | ||
100 | vfrom = kmap_coherent(from, vaddr); | 107 | vfrom = kmap_coherent(from, vaddr); |
101 | copy_page(vto, vfrom); | 108 | copy_page(vto, vfrom); |
102 | kunmap_coherent(vfrom); | 109 | kunmap_coherent(vfrom); |