diff options
author | Stuart Menefy <stuart.menefy@st.com> | 2009-10-27 11:14:06 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-11-24 03:13:35 -0500 |
commit | 39ac11c1607f1d566e7cf885acd403fa4f07f8a2 (patch) | |
tree | a47e6bd8526742f9dfdc177253654e2cac5b829f /arch/sh | |
parent | 49fb2cd2571e0134e5a12c5abab227696e4940c7 (diff) |
sh: Improve performance of SH4 versions of copy/clear_user_highpage
The previous implementation of clear_user_highpage and copy_user_highpage
checked to see if there was a D-cache aliasing issue between the user
and kernel mappings of a page, but if there was they always did a
flush with writeback on the dirtied kernel alias.
However as we now have the ability to map a page into kernel space
with the same cache colour as the user mapping, there is no need to
write back this data.
Currently we also invalidate the kernel alias as a precaution, however
I'm not sure if this is actually required.
Also correct the definition of FIX_CMAP_END so that the mappings created
by kmap_coherent() are actually at the correct colour.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/fixmap.h | 8 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 66 |
2 files changed, 60 insertions, 14 deletions
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 76c5a3099cb8..5ac1e40a511c 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h | |||
@@ -46,9 +46,15 @@ | |||
46 | * fix-mapped? | 46 | * fix-mapped? |
47 | */ | 47 | */ |
48 | enum fixed_addresses { | 48 | enum fixed_addresses { |
49 | /* | ||
50 | * The FIX_CMAP entries are used by kmap_coherent() to get virtual | ||
51 | * addresses which are of a known color, and so their values are | ||
52 | * important. __fix_to_virt(FIX_CMAP_END - n) must give an address | ||
53 | * which is the same color as a page (n<<PAGE_SHIFT). | ||
54 | */ | ||
49 | #define FIX_N_COLOURS 8 | 55 | #define FIX_N_COLOURS 8 |
50 | FIX_CMAP_BEGIN, | 56 | FIX_CMAP_BEGIN, |
51 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS), | 57 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, |
52 | FIX_UNCACHED, | 58 | FIX_UNCACHED, |
53 | #ifdef CONFIG_HIGHMEM | 59 | #ifdef CONFIG_HIGHMEM |
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 60 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index e9415d3ea94a..997c7e42b1e1 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -46,6 +46,18 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, | |||
46 | preempt_enable(); | 46 | preempt_enable(); |
47 | } | 47 | } |
48 | 48 | ||
49 | /* | ||
50 | * copy_to_user_page | ||
51 | * @vma: vm_area_struct holding the pages | ||
52 | * @page: struct page | ||
53 | * @vaddr: user space address | ||
54 | * @dst: address of page in kernel space (possibly from kmap) | ||
55 | * @src: source address in kernel logical memory | ||
56 | * @len: length of data in bytes (may be less than PAGE_SIZE) | ||
57 | * | ||
58 | * Copy data into the address space of a process other than the current | ||
59 | * process (eg for ptrace). | ||
60 | */ | ||
49 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 61 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
50 | unsigned long vaddr, void *dst, const void *src, | 62 | unsigned long vaddr, void *dst, const void *src, |
51 | unsigned long len) | 63 | unsigned long len) |
@@ -81,28 +93,49 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
81 | } | 93 | } |
82 | } | 94 | } |
83 | 95 | ||
96 | /* | ||
97 | * copy_user_highpage | ||
98 | * @to: destination page | ||
99 | * @from: source page | ||
100 | * @vaddr: address of pages in user address space | ||
101 | * @vma: vm_area_struct holding the pages | ||
102 | * | ||
103 | * This is used in COW implementation to copy data from page @from to | ||
104 | * page @to. @from was previousl mapped at @vaddr, and @to will be. | ||
105 | * As this is used only in the COW implementation, this means that the | ||
106 | * source is unmodified, and so we don't have to worry about cache | ||
107 | * aliasing on that side. | ||
108 | */ | ||
109 | #ifdef CONFIG_HIGHMEM | ||
110 | /* | ||
111 | * If we ever have a real highmem system, this code will need fixing | ||
112 | * (as will clear_user/clear_user_highmem), because the kmap potentitally | ||
113 | * creates another alias risk. | ||
114 | */ | ||
115 | #error This code is broken with real HIGHMEM | ||
116 | #endif | ||
84 | void copy_user_highpage(struct page *to, struct page *from, | 117 | void copy_user_highpage(struct page *to, struct page *from, |
85 | unsigned long vaddr, struct vm_area_struct *vma) | 118 | unsigned long vaddr, struct vm_area_struct *vma) |
86 | { | 119 | { |
87 | void *vfrom, *vto; | 120 | void *vfrom, *vto; |
88 | 121 | ||
89 | vto = kmap_atomic(to, KM_USER1); | 122 | vto = kmap_atomic(to, KM_USER1); |
123 | vfrom = kmap_atomic(from, KM_USER0); | ||
124 | |||
125 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | ||
126 | __flush_invalidate_region(vto, PAGE_SIZE); | ||
90 | 127 | ||
91 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | 128 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && |
92 | !test_bit(PG_dcache_dirty, &from->flags)) { | 129 | !test_bit(PG_dcache_dirty, &from->flags)) { |
93 | vfrom = kmap_coherent(from, vaddr); | 130 | void *vto_coloured = kmap_coherent(to, vaddr); |
131 | copy_page(vto_coloured, vfrom); | ||
132 | kunmap_coherent(vto_coloured); | ||
133 | } else | ||
94 | copy_page(vto, vfrom); | 134 | copy_page(vto, vfrom); |
95 | kunmap_coherent(vfrom); | ||
96 | } else { | ||
97 | vfrom = kmap_atomic(from, KM_USER0); | ||
98 | copy_page(vto, vfrom); | ||
99 | kunmap_atomic(vfrom, KM_USER0); | ||
100 | } | ||
101 | |||
102 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | ||
103 | __flush_purge_region(vto, PAGE_SIZE); | ||
104 | 135 | ||
136 | kunmap_atomic(vfrom, KM_USER0); | ||
105 | kunmap_atomic(vto, KM_USER1); | 137 | kunmap_atomic(vto, KM_USER1); |
138 | |||
106 | /* Make sure this page is cleared on other CPU's too before using it */ | 139 | /* Make sure this page is cleared on other CPU's too before using it */ |
107 | smp_wmb(); | 140 | smp_wmb(); |
108 | } | 141 | } |
@@ -112,10 +145,17 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) | |||
112 | { | 145 | { |
113 | void *kaddr = kmap_atomic(page, KM_USER0); | 146 | void *kaddr = kmap_atomic(page, KM_USER0); |
114 | 147 | ||
115 | clear_page(kaddr); | 148 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) { |
149 | void *vto; | ||
116 | 150 | ||
117 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | 151 | /* Kernel alias may have modified data in the cache. */ |
118 | __flush_purge_region(kaddr, PAGE_SIZE); | 152 | __flush_invalidate_region(kaddr, PAGE_SIZE); |
153 | |||
154 | vto = kmap_coherent(page, vaddr); | ||
155 | clear_page(vto); | ||
156 | kunmap_coherent(vto); | ||
157 | } else | ||
158 | clear_page(kaddr); | ||
119 | 159 | ||
120 | kunmap_atomic(kaddr, KM_USER0); | 160 | kunmap_atomic(kaddr, KM_USER0); |
121 | } | 161 | } |