aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/fixmap.h8
-rw-r--r--arch/sh/mm/cache.c66
2 files changed, 60 insertions, 14 deletions
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index 76c5a3099cb8..5ac1e40a511c 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -46,9 +46,15 @@
46 * fix-mapped? 46 * fix-mapped?
47 */ 47 */
48enum fixed_addresses { 48enum fixed_addresses {
49 /*
50 * The FIX_CMAP entries are used by kmap_coherent() to get virtual
51 * addresses which are of a known color, and so their values are
52 * important. __fix_to_virt(FIX_CMAP_END - n) must give an address
53 * which is the same color as a page (n<<PAGE_SHIFT).
54 */
49#define FIX_N_COLOURS 8 55#define FIX_N_COLOURS 8
50 FIX_CMAP_BEGIN, 56 FIX_CMAP_BEGIN,
51 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS), 57 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
52 FIX_UNCACHED, 58 FIX_UNCACHED,
53#ifdef CONFIG_HIGHMEM 59#ifdef CONFIG_HIGHMEM
54 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 60 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index e9415d3ea94a..997c7e42b1e1 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -46,6 +46,18 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
46 preempt_enable(); 46 preempt_enable();
47} 47}
48 48
49/*
50 * copy_to_user_page
51 * @vma: vm_area_struct holding the pages
52 * @page: struct page
53 * @vaddr: user space address
54 * @dst: address of page in kernel space (possibly from kmap)
55 * @src: source address in kernel logical memory
56 * @len: length of data in bytes (may be less than PAGE_SIZE)
57 *
58 * Copy data into the address space of a process other than the current
59 * process (eg for ptrace).
60 */
49void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 61void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
50 unsigned long vaddr, void *dst, const void *src, 62 unsigned long vaddr, void *dst, const void *src,
51 unsigned long len) 63 unsigned long len)
@@ -81,28 +93,49 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
81 } 93 }
82} 94}
83 95
96/*
97 * copy_user_highpage
98 * @to: destination page
99 * @from: source page
100 * @vaddr: address of pages in user address space
101 * @vma: vm_area_struct holding the pages
102 *
103 * This is used in COW implementation to copy data from page @from to
104 * page @to. @from was previousl mapped at @vaddr, and @to will be.
105 * As this is used only in the COW implementation, this means that the
106 * source is unmodified, and so we don't have to worry about cache
107 * aliasing on that side.
108 */
109#ifdef CONFIG_HIGHMEM
110/*
111 * If we ever have a real highmem system, this code will need fixing
112 * (as will clear_user/clear_user_highmem), because the kmap potentitally
113 * creates another alias risk.
114 */
115#error This code is broken with real HIGHMEM
116#endif
84void copy_user_highpage(struct page *to, struct page *from, 117void copy_user_highpage(struct page *to, struct page *from,
85 unsigned long vaddr, struct vm_area_struct *vma) 118 unsigned long vaddr, struct vm_area_struct *vma)
86{ 119{
87 void *vfrom, *vto; 120 void *vfrom, *vto;
88 121
89 vto = kmap_atomic(to, KM_USER1); 122 vto = kmap_atomic(to, KM_USER1);
123 vfrom = kmap_atomic(from, KM_USER0);
124
125 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
126 __flush_invalidate_region(vto, PAGE_SIZE);
90 127
91 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && 128 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
92 !test_bit(PG_dcache_dirty, &from->flags)) { 129 !test_bit(PG_dcache_dirty, &from->flags)) {
93 vfrom = kmap_coherent(from, vaddr); 130 void *vto_coloured = kmap_coherent(to, vaddr);
131 copy_page(vto_coloured, vfrom);
132 kunmap_coherent(vto_coloured);
133 } else
94 copy_page(vto, vfrom); 134 copy_page(vto, vfrom);
95 kunmap_coherent(vfrom);
96 } else {
97 vfrom = kmap_atomic(from, KM_USER0);
98 copy_page(vto, vfrom);
99 kunmap_atomic(vfrom, KM_USER0);
100 }
101
102 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
103 __flush_purge_region(vto, PAGE_SIZE);
104 135
136 kunmap_atomic(vfrom, KM_USER0);
105 kunmap_atomic(vto, KM_USER1); 137 kunmap_atomic(vto, KM_USER1);
138
106 /* Make sure this page is cleared on other CPU's too before using it */ 139 /* Make sure this page is cleared on other CPU's too before using it */
107 smp_wmb(); 140 smp_wmb();
108} 141}
@@ -112,10 +145,17 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
112{ 145{
113 void *kaddr = kmap_atomic(page, KM_USER0); 146 void *kaddr = kmap_atomic(page, KM_USER0);
114 147
115 clear_page(kaddr); 148 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) {
149 void *vto;
116 150
117 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) 151 /* Kernel alias may have modified data in the cache. */
118 __flush_purge_region(kaddr, PAGE_SIZE); 152 __flush_invalidate_region(kaddr, PAGE_SIZE);
153
154 vto = kmap_coherent(page, vaddr);
155 clear_page(vto);
156 kunmap_coherent(vto);
157 } else
158 clear_page(kaddr);
119 159
120 kunmap_atomic(kaddr, KM_USER0); 160 kunmap_atomic(kaddr, KM_USER0);
121} 161}