diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-octeon.c | 16 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 165 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 11 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 34 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 11 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 20 |
8 files changed, 145 insertions, 136 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 0f9c488044d1..16c4d256b76f 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c | |||
@@ -181,10 +181,10 @@ static void __cpuinit probe_octeon(void) | |||
181 | unsigned int config1; | 181 | unsigned int config1; |
182 | struct cpuinfo_mips *c = ¤t_cpu_data; | 182 | struct cpuinfo_mips *c = ¤t_cpu_data; |
183 | 183 | ||
184 | config1 = read_c0_config1(); | ||
184 | switch (c->cputype) { | 185 | switch (c->cputype) { |
185 | case CPU_CAVIUM_OCTEON: | 186 | case CPU_CAVIUM_OCTEON: |
186 | case CPU_CAVIUM_OCTEON_PLUS: | 187 | case CPU_CAVIUM_OCTEON_PLUS: |
187 | config1 = read_c0_config1(); | ||
188 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | 188 | c->icache.linesz = 2 << ((config1 >> 19) & 7); |
189 | c->icache.sets = 64 << ((config1 >> 22) & 7); | 189 | c->icache.sets = 64 << ((config1 >> 22) & 7); |
190 | c->icache.ways = 1 + ((config1 >> 16) & 7); | 190 | c->icache.ways = 1 + ((config1 >> 16) & 7); |
@@ -204,6 +204,20 @@ static void __cpuinit probe_octeon(void) | |||
204 | c->options |= MIPS_CPU_PREFETCH; | 204 | c->options |= MIPS_CPU_PREFETCH; |
205 | break; | 205 | break; |
206 | 206 | ||
207 | case CPU_CAVIUM_OCTEON2: | ||
208 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | ||
209 | c->icache.sets = 8; | ||
210 | c->icache.ways = 37; | ||
211 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
212 | icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; | ||
213 | |||
214 | c->dcache.linesz = 128; | ||
215 | c->dcache.ways = 32; | ||
216 | c->dcache.sets = 8; | ||
217 | dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; | ||
218 | c->options |= MIPS_CPU_PREFETCH; | ||
219 | break; | ||
220 | |||
207 | default: | 221 | default: |
208 | panic("Unsupported Cavium Networks CPU type\n"); | 222 | panic("Unsupported Cavium Networks CPU type\n"); |
209 | break; | 223 | break; |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6721ee2b1e8b..b4923a75cb4b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -42,14 +42,14 @@ | |||
42 | * o collapses to normal function call on UP kernels | 42 | * o collapses to normal function call on UP kernels |
43 | * o collapses to normal function call on systems with a single shared | 43 | * o collapses to normal function call on systems with a single shared |
44 | * primary cache. | 44 | * primary cache. |
45 | * o doesn't disable interrupts on the local CPU | ||
45 | */ | 46 | */ |
46 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, | 47 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) |
47 | int wait) | ||
48 | { | 48 | { |
49 | preempt_disable(); | 49 | preempt_disable(); |
50 | 50 | ||
51 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | 51 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) |
52 | smp_call_function(func, info, wait); | 52 | smp_call_function(func, info, 1); |
53 | #endif | 53 | #endif |
54 | func(info); | 54 | func(info); |
55 | preempt_enable(); | 55 | preempt_enable(); |
@@ -363,7 +363,7 @@ static inline void local_r4k___flush_cache_all(void * args) | |||
363 | 363 | ||
364 | static void r4k___flush_cache_all(void) | 364 | static void r4k___flush_cache_all(void) |
365 | { | 365 | { |
366 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); | 366 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); |
367 | } | 367 | } |
368 | 368 | ||
369 | static inline int has_valid_asid(const struct mm_struct *mm) | 369 | static inline int has_valid_asid(const struct mm_struct *mm) |
@@ -410,7 +410,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, | |||
410 | int exec = vma->vm_flags & VM_EXEC; | 410 | int exec = vma->vm_flags & VM_EXEC; |
411 | 411 | ||
412 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) | 412 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) |
413 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); | 413 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma); |
414 | } | 414 | } |
415 | 415 | ||
416 | static inline void local_r4k_flush_cache_mm(void * args) | 416 | static inline void local_r4k_flush_cache_mm(void * args) |
@@ -442,7 +442,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) | |||
442 | if (!cpu_has_dc_aliases) | 442 | if (!cpu_has_dc_aliases) |
443 | return; | 443 | return; |
444 | 444 | ||
445 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); | 445 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); |
446 | } | 446 | } |
447 | 447 | ||
448 | struct flush_cache_page_args { | 448 | struct flush_cache_page_args { |
@@ -534,7 +534,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, | |||
534 | args.addr = addr; | 534 | args.addr = addr; |
535 | args.pfn = pfn; | 535 | args.pfn = pfn; |
536 | 536 | ||
537 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); | 537 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args); |
538 | } | 538 | } |
539 | 539 | ||
540 | static inline void local_r4k_flush_data_cache_page(void * addr) | 540 | static inline void local_r4k_flush_data_cache_page(void * addr) |
@@ -547,8 +547,7 @@ static void r4k_flush_data_cache_page(unsigned long addr) | |||
547 | if (in_atomic()) | 547 | if (in_atomic()) |
548 | local_r4k_flush_data_cache_page((void *)addr); | 548 | local_r4k_flush_data_cache_page((void *)addr); |
549 | else | 549 | else |
550 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, | 550 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); |
551 | 1); | ||
552 | } | 551 | } |
553 | 552 | ||
554 | struct flush_icache_range_args { | 553 | struct flush_icache_range_args { |
@@ -589,7 +588,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
589 | args.start = start; | 588 | args.start = start; |
590 | args.end = end; | 589 | args.end = end; |
591 | 590 | ||
592 | r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1); | 591 | r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); |
593 | instruction_hazard(); | 592 | instruction_hazard(); |
594 | } | 593 | } |
595 | 594 | ||
@@ -710,7 +709,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
710 | 709 | ||
711 | static void r4k_flush_cache_sigtramp(unsigned long addr) | 710 | static void r4k_flush_cache_sigtramp(unsigned long addr) |
712 | { | 711 | { |
713 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); | 712 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); |
714 | } | 713 | } |
715 | 714 | ||
716 | static void r4k_flush_icache_all(void) | 715 | static void r4k_flush_icache_all(void) |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 469d4019f795..4fc1a0fbe007 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -95,10 +95,9 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
95 | 95 | ||
96 | return ret; | 96 | return ret; |
97 | } | 97 | } |
98 | |||
99 | EXPORT_SYMBOL(dma_alloc_noncoherent); | 98 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
100 | 99 | ||
101 | void *dma_alloc_coherent(struct device *dev, size_t size, | 100 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
102 | dma_addr_t * dma_handle, gfp_t gfp) | 101 | dma_addr_t * dma_handle, gfp_t gfp) |
103 | { | 102 | { |
104 | void *ret; | 103 | void *ret; |
@@ -123,7 +122,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
123 | return ret; | 122 | return ret; |
124 | } | 123 | } |
125 | 124 | ||
126 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
127 | 125 | ||
128 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | 126 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
129 | dma_addr_t dma_handle) | 127 | dma_addr_t dma_handle) |
@@ -131,10 +129,9 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |||
131 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); | 129 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
132 | free_pages((unsigned long) vaddr, get_order(size)); | 130 | free_pages((unsigned long) vaddr, get_order(size)); |
133 | } | 131 | } |
134 | |||
135 | EXPORT_SYMBOL(dma_free_noncoherent); | 132 | EXPORT_SYMBOL(dma_free_noncoherent); |
136 | 133 | ||
137 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 134 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
138 | dma_addr_t dma_handle) | 135 | dma_addr_t dma_handle) |
139 | { | 136 | { |
140 | unsigned long addr = (unsigned long) vaddr; | 137 | unsigned long addr = (unsigned long) vaddr; |
@@ -151,8 +148,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
151 | free_pages(addr, get_order(size)); | 148 | free_pages(addr, get_order(size)); |
152 | } | 149 | } |
153 | 150 | ||
154 | EXPORT_SYMBOL(dma_free_coherent); | ||
155 | |||
156 | static inline void __dma_sync(unsigned long addr, size_t size, | 151 | static inline void __dma_sync(unsigned long addr, size_t size, |
157 | enum dma_data_direction direction) | 152 | enum dma_data_direction direction) |
158 | { | 153 | { |
@@ -174,21 +169,8 @@ static inline void __dma_sync(unsigned long addr, size_t size, | |||
174 | } | 169 | } |
175 | } | 170 | } |
176 | 171 | ||
177 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 172 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
178 | enum dma_data_direction direction) | 173 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) |
179 | { | ||
180 | unsigned long addr = (unsigned long) ptr; | ||
181 | |||
182 | if (!plat_device_is_coherent(dev)) | ||
183 | __dma_sync(addr, size, direction); | ||
184 | |||
185 | return plat_map_dma_mem(dev, ptr, size); | ||
186 | } | ||
187 | |||
188 | EXPORT_SYMBOL(dma_map_single); | ||
189 | |||
190 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
191 | enum dma_data_direction direction) | ||
192 | { | 174 | { |
193 | if (cpu_is_noncoherent_r10000(dev)) | 175 | if (cpu_is_noncoherent_r10000(dev)) |
194 | __dma_sync(dma_addr_to_virt(dev, dma_addr), size, | 176 | __dma_sync(dma_addr_to_virt(dev, dma_addr), size, |
@@ -197,15 +179,11 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
197 | plat_unmap_dma_mem(dev, dma_addr, size, direction); | 179 | plat_unmap_dma_mem(dev, dma_addr, size, direction); |
198 | } | 180 | } |
199 | 181 | ||
200 | EXPORT_SYMBOL(dma_unmap_single); | 182 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, |
201 | 183 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | |
202 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
203 | enum dma_data_direction direction) | ||
204 | { | 184 | { |
205 | int i; | 185 | int i; |
206 | 186 | ||
207 | BUG_ON(direction == DMA_NONE); | ||
208 | |||
209 | for (i = 0; i < nents; i++, sg++) { | 187 | for (i = 0; i < nents; i++, sg++) { |
210 | unsigned long addr; | 188 | unsigned long addr; |
211 | 189 | ||
@@ -219,33 +197,27 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
219 | return nents; | 197 | return nents; |
220 | } | 198 | } |
221 | 199 | ||
222 | EXPORT_SYMBOL(dma_map_sg); | 200 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, |
223 | 201 | unsigned long offset, size_t size, enum dma_data_direction direction, | |
224 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 202 | struct dma_attrs *attrs) |
225 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
226 | { | 203 | { |
227 | BUG_ON(direction == DMA_NONE); | 204 | unsigned long addr; |
228 | 205 | ||
229 | if (!plat_device_is_coherent(dev)) { | 206 | addr = (unsigned long) page_address(page) + offset; |
230 | unsigned long addr; | ||
231 | 207 | ||
232 | addr = (unsigned long) page_address(page) + offset; | 208 | if (!plat_device_is_coherent(dev)) |
233 | __dma_sync(addr, size, direction); | 209 | __dma_sync(addr, size, direction); |
234 | } | ||
235 | 210 | ||
236 | return plat_map_dma_mem_page(dev, page) + offset; | 211 | return plat_map_dma_mem(dev, (void *)addr, size); |
237 | } | 212 | } |
238 | 213 | ||
239 | EXPORT_SYMBOL(dma_map_page); | 214 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
240 | 215 | int nhwentries, enum dma_data_direction direction, | |
241 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 216 | struct dma_attrs *attrs) |
242 | enum dma_data_direction direction) | ||
243 | { | 217 | { |
244 | unsigned long addr; | 218 | unsigned long addr; |
245 | int i; | 219 | int i; |
246 | 220 | ||
247 | BUG_ON(direction == DMA_NONE); | ||
248 | |||
249 | for (i = 0; i < nhwentries; i++, sg++) { | 221 | for (i = 0; i < nhwentries; i++, sg++) { |
250 | if (!plat_device_is_coherent(dev) && | 222 | if (!plat_device_is_coherent(dev) && |
251 | direction != DMA_TO_DEVICE) { | 223 | direction != DMA_TO_DEVICE) { |
@@ -257,13 +229,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
257 | } | 229 | } |
258 | } | 230 | } |
259 | 231 | ||
260 | EXPORT_SYMBOL(dma_unmap_sg); | 232 | static void mips_dma_sync_single_for_cpu(struct device *dev, |
261 | 233 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) | |
262 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
263 | size_t size, enum dma_data_direction direction) | ||
264 | { | 234 | { |
265 | BUG_ON(direction == DMA_NONE); | ||
266 | |||
267 | if (cpu_is_noncoherent_r10000(dev)) { | 235 | if (cpu_is_noncoherent_r10000(dev)) { |
268 | unsigned long addr; | 236 | unsigned long addr; |
269 | 237 | ||
@@ -272,13 +240,9 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |||
272 | } | 240 | } |
273 | } | 241 | } |
274 | 242 | ||
275 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 243 | static void mips_dma_sync_single_for_device(struct device *dev, |
276 | 244 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) | |
277 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
278 | size_t size, enum dma_data_direction direction) | ||
279 | { | 245 | { |
280 | BUG_ON(direction == DMA_NONE); | ||
281 | |||
282 | plat_extra_sync_for_device(dev); | 246 | plat_extra_sync_for_device(dev); |
283 | if (!plat_device_is_coherent(dev)) { | 247 | if (!plat_device_is_coherent(dev)) { |
284 | unsigned long addr; | 248 | unsigned long addr; |
@@ -288,46 +252,11 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |||
288 | } | 252 | } |
289 | } | 253 | } |
290 | 254 | ||
291 | EXPORT_SYMBOL(dma_sync_single_for_device); | 255 | static void mips_dma_sync_sg_for_cpu(struct device *dev, |
292 | 256 | struct scatterlist *sg, int nelems, enum dma_data_direction direction) | |
293 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
294 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
295 | { | ||
296 | BUG_ON(direction == DMA_NONE); | ||
297 | |||
298 | if (cpu_is_noncoherent_r10000(dev)) { | ||
299 | unsigned long addr; | ||
300 | |||
301 | addr = dma_addr_to_virt(dev, dma_handle); | ||
302 | __dma_sync(addr + offset, size, direction); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
307 | |||
308 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
309 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
310 | { | ||
311 | BUG_ON(direction == DMA_NONE); | ||
312 | |||
313 | plat_extra_sync_for_device(dev); | ||
314 | if (!plat_device_is_coherent(dev)) { | ||
315 | unsigned long addr; | ||
316 | |||
317 | addr = dma_addr_to_virt(dev, dma_handle); | ||
318 | __dma_sync(addr + offset, size, direction); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
323 | |||
324 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
325 | enum dma_data_direction direction) | ||
326 | { | 257 | { |
327 | int i; | 258 | int i; |
328 | 259 | ||
329 | BUG_ON(direction == DMA_NONE); | ||
330 | |||
331 | /* Make sure that gcc doesn't leave the empty loop body. */ | 260 | /* Make sure that gcc doesn't leave the empty loop body. */ |
332 | for (i = 0; i < nelems; i++, sg++) { | 261 | for (i = 0; i < nelems; i++, sg++) { |
333 | if (cpu_is_noncoherent_r10000(dev)) | 262 | if (cpu_is_noncoherent_r10000(dev)) |
@@ -336,15 +265,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |||
336 | } | 265 | } |
337 | } | 266 | } |
338 | 267 | ||
339 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 268 | static void mips_dma_sync_sg_for_device(struct device *dev, |
340 | 269 | struct scatterlist *sg, int nelems, enum dma_data_direction direction) | |
341 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
342 | enum dma_data_direction direction) | ||
343 | { | 270 | { |
344 | int i; | 271 | int i; |
345 | 272 | ||
346 | BUG_ON(direction == DMA_NONE); | ||
347 | |||
348 | /* Make sure that gcc doesn't leave the empty loop body. */ | 273 | /* Make sure that gcc doesn't leave the empty loop body. */ |
349 | for (i = 0; i < nelems; i++, sg++) { | 274 | for (i = 0; i < nelems; i++, sg++) { |
350 | if (!plat_device_is_coherent(dev)) | 275 | if (!plat_device_is_coherent(dev)) |
@@ -353,24 +278,18 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
353 | } | 278 | } |
354 | } | 279 | } |
355 | 280 | ||
356 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 281 | int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
357 | |||
358 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
359 | { | 282 | { |
360 | return plat_dma_mapping_error(dev, dma_addr); | 283 | return plat_dma_mapping_error(dev, dma_addr); |
361 | } | 284 | } |
362 | 285 | ||
363 | EXPORT_SYMBOL(dma_mapping_error); | 286 | int mips_dma_supported(struct device *dev, u64 mask) |
364 | |||
365 | int dma_supported(struct device *dev, u64 mask) | ||
366 | { | 287 | { |
367 | return plat_dma_supported(dev, mask); | 288 | return plat_dma_supported(dev, mask); |
368 | } | 289 | } |
369 | 290 | ||
370 | EXPORT_SYMBOL(dma_supported); | 291 | void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
371 | 292 | enum dma_data_direction direction) | |
372 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
373 | enum dma_data_direction direction) | ||
374 | { | 293 | { |
375 | BUG_ON(direction == DMA_NONE); | 294 | BUG_ON(direction == DMA_NONE); |
376 | 295 | ||
@@ -379,4 +298,30 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |||
379 | __dma_sync((unsigned long)vaddr, size, direction); | 298 | __dma_sync((unsigned long)vaddr, size, direction); |
380 | } | 299 | } |
381 | 300 | ||
382 | EXPORT_SYMBOL(dma_cache_sync); | 301 | static struct dma_map_ops mips_default_dma_map_ops = { |
302 | .alloc_coherent = mips_dma_alloc_coherent, | ||
303 | .free_coherent = mips_dma_free_coherent, | ||
304 | .map_page = mips_dma_map_page, | ||
305 | .unmap_page = mips_dma_unmap_page, | ||
306 | .map_sg = mips_dma_map_sg, | ||
307 | .unmap_sg = mips_dma_unmap_sg, | ||
308 | .sync_single_for_cpu = mips_dma_sync_single_for_cpu, | ||
309 | .sync_single_for_device = mips_dma_sync_single_for_device, | ||
310 | .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, | ||
311 | .sync_sg_for_device = mips_dma_sync_sg_for_device, | ||
312 | .mapping_error = mips_dma_mapping_error, | ||
313 | .dma_supported = mips_dma_supported | ||
314 | }; | ||
315 | |||
316 | struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; | ||
317 | EXPORT_SYMBOL(mips_dma_map_ops); | ||
318 | |||
319 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | ||
320 | |||
321 | static int __init mips_dma_init(void) | ||
322 | { | ||
323 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | fs_initcall(mips_dma_init); | ||
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 783ad0065fdf..137ee76a0045 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
21 | #include <linux/perf_event.h> | ||
21 | 22 | ||
22 | #include <asm/branch.h> | 23 | #include <asm/branch.h> |
23 | #include <asm/mmu_context.h> | 24 | #include <asm/mmu_context.h> |
@@ -144,6 +145,7 @@ good_area: | |||
144 | * the fault. | 145 | * the fault. |
145 | */ | 146 | */ |
146 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | 147 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
148 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | ||
147 | if (unlikely(fault & VM_FAULT_ERROR)) { | 149 | if (unlikely(fault & VM_FAULT_ERROR)) { |
148 | if (fault & VM_FAULT_OOM) | 150 | if (fault & VM_FAULT_OOM) |
149 | goto out_of_memory; | 151 | goto out_of_memory; |
@@ -151,10 +153,15 @@ good_area: | |||
151 | goto do_sigbus; | 153 | goto do_sigbus; |
152 | BUG(); | 154 | BUG(); |
153 | } | 155 | } |
154 | if (fault & VM_FAULT_MAJOR) | 156 | if (fault & VM_FAULT_MAJOR) { |
157 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, | ||
158 | 1, 0, regs, address); | ||
155 | tsk->maj_flt++; | 159 | tsk->maj_flt++; |
156 | else | 160 | } else { |
161 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | ||
162 | 1, 0, regs, address); | ||
157 | tsk->min_flt++; | 163 | tsk->min_flt++; |
164 | } | ||
158 | 165 | ||
159 | up_read(&mm->mmap_sem); | 166 | up_read(&mm->mmap_sem); |
160 | return; | 167 | return; |
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 1e69b1fb4b85..3634c7ea06ac 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c | |||
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr) | |||
74 | return; | 74 | return; |
75 | } | 75 | } |
76 | 76 | ||
77 | type = kmap_atomic_idx_pop(); | 77 | type = kmap_atomic_idx(); |
78 | #ifdef CONFIG_DEBUG_HIGHMEM | 78 | #ifdef CONFIG_DEBUG_HIGHMEM |
79 | { | 79 | { |
80 | int idx = type + KM_TYPE_NR * smp_processor_id(); | 80 | int idx = type + KM_TYPE_NR * smp_processor_id(); |
@@ -89,6 +89,7 @@ void __kunmap_atomic(void *kvaddr) | |||
89 | local_flush_tlb_one(vaddr); | 89 | local_flush_tlb_one(vaddr); |
90 | } | 90 | } |
91 | #endif | 91 | #endif |
92 | kmap_atomic_idx_pop(); | ||
92 | pagefault_enable(); | 93 | pagefault_enable(); |
93 | } | 94 | } |
94 | EXPORT_SYMBOL(__kunmap_atomic); | 95 | EXPORT_SYMBOL(__kunmap_atomic); |
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 5ab5fa8c1d82..505fecad4684 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c | |||
@@ -57,6 +57,34 @@ static struct bcache_ops mips_sc_ops = { | |||
57 | .bc_inv = mips_sc_inv | 57 | .bc_inv = mips_sc_inv |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* | ||
61 | * Check if the L2 cache controller is activated on a particular platform. | ||
62 | * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS | ||
63 | * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the | ||
64 | * cache being disabled. However there is no guarantee for this to be | ||
65 | * true on all platforms. In an act of stupidity the spec defined bits | ||
66 | * 12..15 as implementation defined so below function will eventually have | ||
67 | * to be replaced by a platform specific probe. | ||
68 | */ | ||
69 | static inline int mips_sc_is_activated(struct cpuinfo_mips *c) | ||
70 | { | ||
71 | /* Check the bypass bit (L2B) */ | ||
72 | switch (c->cputype) { | ||
73 | case CPU_34K: | ||
74 | case CPU_74K: | ||
75 | case CPU_1004K: | ||
76 | case CPU_BMIPS5000: | ||
77 | if (config2 & (1 << 12)) | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | tmp = (config2 >> 4) & 0x0f; | ||
82 | if (0 < tmp && tmp <= 7) | ||
83 | c->scache.linesz = 2 << tmp; | ||
84 | else | ||
85 | return 0; | ||
86 | } | ||
87 | |||
60 | static inline int __init mips_sc_probe(void) | 88 | static inline int __init mips_sc_probe(void) |
61 | { | 89 | { |
62 | struct cpuinfo_mips *c = ¤t_cpu_data; | 90 | struct cpuinfo_mips *c = ¤t_cpu_data; |
@@ -79,10 +107,8 @@ static inline int __init mips_sc_probe(void) | |||
79 | return 0; | 107 | return 0; |
80 | 108 | ||
81 | config2 = read_c0_config2(); | 109 | config2 = read_c0_config2(); |
82 | tmp = (config2 >> 4) & 0x0f; | 110 | |
83 | if (0 < tmp && tmp <= 7) | 111 | if (!mips_sc_is_activated(c)) |
84 | c->scache.linesz = 2 << tmp; | ||
85 | else | ||
86 | return 0; | 112 | return 0; |
87 | 113 | ||
88 | tmp = (config2 >> 8) & 0x0f; | 114 | tmp = (config2 >> 8) & 0x0f; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4510e61883eb..93816f3bca67 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -338,13 +338,12 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
338 | case CPU_4KSC: | 338 | case CPU_4KSC: |
339 | case CPU_20KC: | 339 | case CPU_20KC: |
340 | case CPU_25KF: | 340 | case CPU_25KF: |
341 | case CPU_BCM3302: | 341 | case CPU_BMIPS32: |
342 | case CPU_BCM4710: | 342 | case CPU_BMIPS3300: |
343 | case CPU_BMIPS4350: | ||
344 | case CPU_BMIPS4380: | ||
345 | case CPU_BMIPS5000: | ||
343 | case CPU_LOONGSON2: | 346 | case CPU_LOONGSON2: |
344 | case CPU_BCM6338: | ||
345 | case CPU_BCM6345: | ||
346 | case CPU_BCM6348: | ||
347 | case CPU_BCM6358: | ||
348 | case CPU_R5500: | 347 | case CPU_R5500: |
349 | if (m4kc_tlbp_war()) | 348 | if (m4kc_tlbp_war()) |
350 | uasm_i_nop(p); | 349 | uasm_i_nop(p); |
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index d2647a4e012b..23afdebc8e5c 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
@@ -405,7 +405,6 @@ I_u1u2u3(_mfc0) | |||
405 | I_u1u2u3(_mtc0) | 405 | I_u1u2u3(_mtc0) |
406 | I_u2u1u3(_ori) | 406 | I_u2u1u3(_ori) |
407 | I_u3u1u2(_or) | 407 | I_u3u1u2(_or) |
408 | I_u2s3u1(_pref) | ||
409 | I_0(_rfe) | 408 | I_0(_rfe) |
410 | I_u2s3u1(_sc) | 409 | I_u2s3u1(_sc) |
411 | I_u2s3u1(_scd) | 410 | I_u2s3u1(_scd) |
@@ -427,6 +426,25 @@ I_u1(_syscall); | |||
427 | I_u1u2s3(_bbit0); | 426 | I_u1u2s3(_bbit0); |
428 | I_u1u2s3(_bbit1); | 427 | I_u1u2s3(_bbit1); |
429 | 428 | ||
429 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
430 | #include <asm/octeon/octeon.h> | ||
431 | void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, | ||
432 | unsigned int c) | ||
433 | { | ||
434 | if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) | ||
435 | /* | ||
436 | * As per erratum Core-14449, replace prefetches 0-4, | ||
437 | * 6-24 with 'pref 28'. | ||
438 | */ | ||
439 | build_insn(buf, insn_pref, c, 28, b); | ||
440 | else | ||
441 | build_insn(buf, insn_pref, c, a, b); | ||
442 | } | ||
443 | UASM_EXPORT_SYMBOL(uasm_i_pref); | ||
444 | #else | ||
445 | I_u2s3u1(_pref) | ||
446 | #endif | ||
447 | |||
430 | /* Handle labels. */ | 448 | /* Handle labels. */ |
431 | void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) | 449 | void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) |
432 | { | 450 | { |