diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-10-21 18:17:35 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-10-21 18:17:35 -0400 |
commit | f8829caee311207afbc882794bdc5aa0db5caf33 (patch) | |
tree | dbadd9fa746a1a4f091bc7e240ca8d787188a913 /arch/mips/mm/init.c | |
parent | 224dc50ece1b40f8cff5ecadd42a6b2691e231de (diff) |
[MIPS] Fix aliasing bug in copy_to_user_page / copy_from_user_page
The current implementation uses a sequence of a cacheflush and a copy.
This is racy in case of a multithreaded debuggee and renders GDB
virtually unusable.
Aside this fixes a performance hog rendering access to /proc/cmdline very
slow and resulting in a enough cache stalls for the 34K AP/SP programming
model to make the bare metal code on the non-Linux VPE miss RT deadlines.
The main part of this patch was originally written by Ralf Baechle;
Atushi Nemoto did the the debugging.
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/init.c')
-rw-r--r-- | arch/mips/mm/init.c | 167 |
1 files changed, 160 insertions, 7 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 88b72c9a8495..2de4d3c367a2 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -30,11 +30,34 @@ | |||
30 | #include <asm/cachectl.h> | 30 | #include <asm/cachectl.h> |
31 | #include <asm/cpu.h> | 31 | #include <asm/cpu.h> |
32 | #include <asm/dma.h> | 32 | #include <asm/dma.h> |
33 | #include <asm/kmap_types.h> | ||
33 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
34 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
35 | #include <asm/pgtable.h> | 36 | #include <asm/pgtable.h> |
36 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
37 | #include <asm/tlb.h> | 38 | #include <asm/tlb.h> |
39 | #include <asm/fixmap.h> | ||
40 | |||
41 | /* Atomicity and interruptability */ | ||
42 | #ifdef CONFIG_MIPS_MT_SMTC | ||
43 | |||
44 | #include <asm/mipsmtregs.h> | ||
45 | |||
46 | #define ENTER_CRITICAL(flags) \ | ||
47 | { \ | ||
48 | unsigned int mvpflags; \ | ||
49 | local_irq_save(flags);\ | ||
50 | mvpflags = dvpe() | ||
51 | #define EXIT_CRITICAL(flags) \ | ||
52 | evpe(mvpflags); \ | ||
53 | local_irq_restore(flags); \ | ||
54 | } | ||
55 | #else | ||
56 | |||
57 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||
58 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||
59 | |||
60 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
38 | 61 | ||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 62 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | 63 | ||
@@ -80,13 +103,142 @@ unsigned long setup_zero_pages(void) | |||
80 | return 1UL << order; | 103 | return 1UL << order; |
81 | } | 104 | } |
82 | 105 | ||
83 | #ifdef CONFIG_HIGHMEM | 106 | /* |
84 | pte_t *kmap_pte; | 107 | * These are almost like kmap_atomic / kunmap_atmic except they take an |
85 | pgprot_t kmap_prot; | 108 | * additional address argument as the hint. |
109 | */ | ||
86 | 110 | ||
87 | #define kmap_get_fixmap_pte(vaddr) \ | 111 | #define kmap_get_fixmap_pte(vaddr) \ |
88 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | 112 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) |
89 | 113 | ||
114 | #ifdef CONFIG_MIPS_MT_SMTC | ||
115 | static pte_t *kmap_coherent_pte; | ||
116 | static void __init kmap_coherent_init(void) | ||
117 | { | ||
118 | unsigned long vaddr; | ||
119 | |||
120 | /* cache the first coherent kmap pte */ | ||
121 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
122 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
123 | } | ||
124 | #else | ||
125 | static inline void kmap_coherent_init(void) {} | ||
126 | #endif | ||
127 | |||
128 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | ||
129 | { | ||
130 | enum fixed_addresses idx; | ||
131 | unsigned long vaddr, flags, entrylo; | ||
132 | unsigned long old_ctx; | ||
133 | pte_t pte; | ||
134 | int tlbidx; | ||
135 | |||
136 | inc_preempt_count(); | ||
137 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); | ||
138 | #ifdef CONFIG_MIPS_MT_SMTC | ||
139 | idx += FIX_N_COLOURS * smp_processor_id(); | ||
140 | #endif | ||
141 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||
142 | pte = mk_pte(page, PAGE_KERNEL); | ||
143 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) | ||
144 | entrylo = pte.pte_high; | ||
145 | #else | ||
146 | entrylo = pte_val(pte) >> 6; | ||
147 | #endif | ||
148 | |||
149 | ENTER_CRITICAL(flags); | ||
150 | old_ctx = read_c0_entryhi(); | ||
151 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); | ||
152 | write_c0_entrylo0(entrylo); | ||
153 | write_c0_entrylo1(entrylo); | ||
154 | #ifdef CONFIG_MIPS_MT_SMTC | ||
155 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
156 | /* preload TLB instead of local_flush_tlb_one() */ | ||
157 | mtc0_tlbw_hazard(); | ||
158 | tlb_probe(); | ||
159 | tlb_probe_hazard(); | ||
160 | tlbidx = read_c0_index(); | ||
161 | mtc0_tlbw_hazard(); | ||
162 | if (tlbidx < 0) | ||
163 | tlb_write_random(); | ||
164 | else | ||
165 | tlb_write_indexed(); | ||
166 | #else | ||
167 | tlbidx = read_c0_wired(); | ||
168 | write_c0_wired(tlbidx + 1); | ||
169 | write_c0_index(tlbidx); | ||
170 | mtc0_tlbw_hazard(); | ||
171 | tlb_write_indexed(); | ||
172 | #endif | ||
173 | tlbw_use_hazard(); | ||
174 | write_c0_entryhi(old_ctx); | ||
175 | EXIT_CRITICAL(flags); | ||
176 | |||
177 | return (void*) vaddr; | ||
178 | } | ||
179 | |||
180 | #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | ||
181 | |||
182 | static inline void kunmap_coherent(struct page *page) | ||
183 | { | ||
184 | #ifndef CONFIG_MIPS_MT_SMTC | ||
185 | unsigned int wired; | ||
186 | unsigned long flags, old_ctx; | ||
187 | |||
188 | ENTER_CRITICAL(flags); | ||
189 | old_ctx = read_c0_entryhi(); | ||
190 | wired = read_c0_wired() - 1; | ||
191 | write_c0_wired(wired); | ||
192 | write_c0_index(wired); | ||
193 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); | ||
194 | write_c0_entrylo0(0); | ||
195 | write_c0_entrylo1(0); | ||
196 | mtc0_tlbw_hazard(); | ||
197 | tlb_write_indexed(); | ||
198 | tlbw_use_hazard(); | ||
199 | write_c0_entryhi(old_ctx); | ||
200 | EXIT_CRITICAL(flags); | ||
201 | #endif | ||
202 | dec_preempt_count(); | ||
203 | preempt_check_resched(); | ||
204 | } | ||
205 | |||
206 | void copy_to_user_page(struct vm_area_struct *vma, | ||
207 | struct page *page, unsigned long vaddr, void *dst, const void *src, | ||
208 | unsigned long len) | ||
209 | { | ||
210 | if (cpu_has_dc_aliases) { | ||
211 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
212 | memcpy(vto, src, len); | ||
213 | kunmap_coherent(page); | ||
214 | } else | ||
215 | memcpy(dst, src, len); | ||
216 | if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) | ||
217 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
218 | } | ||
219 | |||
220 | EXPORT_SYMBOL(copy_to_user_page); | ||
221 | |||
222 | void copy_from_user_page(struct vm_area_struct *vma, | ||
223 | struct page *page, unsigned long vaddr, void *dst, const void *src, | ||
224 | unsigned long len) | ||
225 | { | ||
226 | if (cpu_has_dc_aliases) { | ||
227 | void *vfrom = | ||
228 | kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
229 | memcpy(dst, vfrom, len); | ||
230 | kunmap_coherent(page); | ||
231 | } else | ||
232 | memcpy(dst, src, len); | ||
233 | } | ||
234 | |||
235 | EXPORT_SYMBOL(copy_from_user_page); | ||
236 | |||
237 | |||
238 | #ifdef CONFIG_HIGHMEM | ||
239 | pte_t *kmap_pte; | ||
240 | pgprot_t kmap_prot; | ||
241 | |||
90 | static void __init kmap_init(void) | 242 | static void __init kmap_init(void) |
91 | { | 243 | { |
92 | unsigned long kmap_vstart; | 244 | unsigned long kmap_vstart; |
@@ -97,11 +249,12 @@ static void __init kmap_init(void) | |||
97 | 249 | ||
98 | kmap_prot = PAGE_KERNEL; | 250 | kmap_prot = PAGE_KERNEL; |
99 | } | 251 | } |
252 | #endif /* CONFIG_HIGHMEM */ | ||
100 | 253 | ||
101 | #ifdef CONFIG_32BIT | ||
102 | void __init fixrange_init(unsigned long start, unsigned long end, | 254 | void __init fixrange_init(unsigned long start, unsigned long end, |
103 | pgd_t *pgd_base) | 255 | pgd_t *pgd_base) |
104 | { | 256 | { |
257 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) | ||
105 | pgd_t *pgd; | 258 | pgd_t *pgd; |
106 | pud_t *pud; | 259 | pud_t *pud; |
107 | pmd_t *pmd; | 260 | pmd_t *pmd; |
@@ -122,7 +275,7 @@ void __init fixrange_init(unsigned long start, unsigned long end, | |||
122 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | 275 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
123 | if (pmd_none(*pmd)) { | 276 | if (pmd_none(*pmd)) { |
124 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 277 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
125 | set_pmd(pmd, __pmd(pte)); | 278 | set_pmd(pmd, __pmd((unsigned long)pte)); |
126 | if (pte != pte_offset_kernel(pmd, 0)) | 279 | if (pte != pte_offset_kernel(pmd, 0)) |
127 | BUG(); | 280 | BUG(); |
128 | } | 281 | } |
@@ -132,9 +285,8 @@ void __init fixrange_init(unsigned long start, unsigned long end, | |||
132 | } | 285 | } |
133 | j = 0; | 286 | j = 0; |
134 | } | 287 | } |
288 | #endif | ||
135 | } | 289 | } |
136 | #endif /* CONFIG_32BIT */ | ||
137 | #endif /* CONFIG_HIGHMEM */ | ||
138 | 290 | ||
139 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 291 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
140 | extern void pagetable_init(void); | 292 | extern void pagetable_init(void); |
@@ -175,6 +327,7 @@ void __init paging_init(void) | |||
175 | #ifdef CONFIG_HIGHMEM | 327 | #ifdef CONFIG_HIGHMEM |
176 | kmap_init(); | 328 | kmap_init(); |
177 | #endif | 329 | #endif |
330 | kmap_coherent_init(); | ||
178 | 331 | ||
179 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 332 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
180 | low = max_low_pfn; | 333 | low = max_low_pfn; |