aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAtsushi Nemoto <anemo@mba.ocn.ne.jp>2006-09-01 11:43:07 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-10-01 18:16:58 -0400
commitc59a0f15be6e586aa0fe1fb5c7f740005c36ec56 (patch)
tree868306f72db9b8113345b2d1eb810323805ca0c3
parent1a6183f2e6f0fa2d1898f0228559df15a89a1ffe (diff)
[MIPS] Remove __flush_icache_page
__flash_icache_page is unused, so kill it. Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/mm/c-r3k.c21
-rw-r--r--arch/mips/mm/c-r4k.c77
-rw-r--r--arch/mips/mm/c-sb1.c61
-rw-r--r--arch/mips/mm/c-tx39.c29
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--include/asm-mips/cacheflush.h2
6 files changed, 0 insertions, 191 deletions
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index e1f35ef81145..d1af42c2a52e 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -268,26 +268,6 @@ static void r3k_flush_data_cache_page(unsigned long addr)
268{ 268{
269} 269}
270 270
271static void r3k_flush_icache_page(struct vm_area_struct *vma, struct page *page)
272{
273 struct mm_struct *mm = vma->vm_mm;
274 unsigned long physpage;
275
276 if (cpu_context(smp_processor_id(), mm) == 0)
277 return;
278
279 if (!(vma->vm_flags & VM_EXEC))
280 return;
281
282#ifdef DEBUG_CACHE
283 printk("cpage[%d,%08lx]", cpu_context(smp_processor_id(), mm), page);
284#endif
285
286 physpage = (unsigned long) page_address(page);
287 if (physpage)
288 r3k_flush_icache_range(physpage, physpage + PAGE_SIZE);
289}
290
291static void r3k_flush_cache_sigtramp(unsigned long addr) 271static void r3k_flush_cache_sigtramp(unsigned long addr)
292{ 272{
293 unsigned long flags; 273 unsigned long flags;
@@ -335,7 +315,6 @@ void __init r3k_cache_init(void)
335 flush_cache_mm = r3k_flush_cache_mm; 315 flush_cache_mm = r3k_flush_cache_mm;
336 flush_cache_range = r3k_flush_cache_range; 316 flush_cache_range = r3k_flush_cache_range;
337 flush_cache_page = r3k_flush_cache_page; 317 flush_cache_page = r3k_flush_cache_page;
338 __flush_icache_page = r3k_flush_icache_page;
339 flush_icache_range = r3k_flush_icache_range; 318 flush_icache_range = r3k_flush_icache_range;
340 319
341 flush_cache_sigtramp = r3k_flush_cache_sigtramp; 320 flush_cache_sigtramp = r3k_flush_cache_sigtramp;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0b2da53750bd..cc895dad71d2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -551,82 +551,6 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
551 instruction_hazard(); 551 instruction_hazard();
552} 552}
553 553
554/*
555 * Ok, this seriously sucks. We use them to flush a user page but don't
556 * know the virtual address, so we have to blast away the whole icache
557 * which is significantly more expensive than the real thing. Otoh we at
558 * least know the kernel address of the page so we can flush it
559 * selectivly.
560 */
561
562struct flush_icache_page_args {
563 struct vm_area_struct *vma;
564 struct page *page;
565};
566
567static inline void local_r4k_flush_icache_page(void *args)
568{
569 struct flush_icache_page_args *fip_args = args;
570 struct vm_area_struct *vma = fip_args->vma;
571 struct page *page = fip_args->page;
572
573 /*
574 * Tricky ... Because we don't know the virtual address we've got the
575 * choice of either invalidating the entire primary and secondary
576 * caches or invalidating the secondary caches also. With the subset
577 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
578 * secondary cache will result in any entries in the primary caches
579 * also getting invalidated which hopefully is a bit more economical.
580 */
581 if (cpu_has_inclusive_pcaches) {
582 unsigned long addr = (unsigned long) page_address(page);
583
584 r4k_blast_scache_page(addr);
585 ClearPageDcacheDirty(page);
586
587 return;
588 }
589
590 if (!cpu_has_ic_fills_f_dc) {
591 unsigned long addr = (unsigned long) page_address(page);
592 r4k_blast_dcache_page(addr);
593 if (!cpu_icache_snoops_remote_store)
594 r4k_blast_scache_page(addr);
595 ClearPageDcacheDirty(page);
596 }
597
598 /*
599 * We're not sure of the virtual address(es) involved here, so
600 * we have to flush the entire I-cache.
601 */
602 if (cpu_has_vtag_icache && vma->vm_mm == current->active_mm) {
603 int cpu = smp_processor_id();
604
605 if (cpu_context(cpu, vma->vm_mm) != 0)
606 drop_mmu_context(vma->vm_mm, cpu);
607 } else
608 r4k_blast_icache();
609}
610
611static void r4k_flush_icache_page(struct vm_area_struct *vma,
612 struct page *page)
613{
614 struct flush_icache_page_args args;
615
616 /*
617 * If there's no context yet, or the page isn't executable, no I-cache
618 * flush is needed.
619 */
620 if (!(vma->vm_flags & VM_EXEC))
621 return;
622
623 args.vma = vma;
624 args.page = page;
625
626 r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
627}
628
629
630#ifdef CONFIG_DMA_NONCOHERENT 554#ifdef CONFIG_DMA_NONCOHERENT
631 555
632static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 556static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
@@ -1291,7 +1215,6 @@ void __init r4k_cache_init(void)
1291 __flush_cache_all = r4k___flush_cache_all; 1215 __flush_cache_all = r4k___flush_cache_all;
1292 flush_cache_mm = r4k_flush_cache_mm; 1216 flush_cache_mm = r4k_flush_cache_mm;
1293 flush_cache_page = r4k_flush_cache_page; 1217 flush_cache_page = r4k_flush_cache_page;
1294 __flush_icache_page = r4k_flush_icache_page;
1295 flush_cache_range = r4k_flush_cache_range; 1218 flush_cache_range = r4k_flush_cache_range;
1296 1219
1297 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1220 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 16bad7c0a63f..5537558f19f7 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -307,66 +307,6 @@ void sb1_flush_icache_range(unsigned long start, unsigned long end)
307#endif 307#endif
308 308
309/* 309/*
310 * Flush the icache for a given physical page. Need to writeback the
311 * dcache first, then invalidate the icache. If the page isn't
312 * executable, nothing is required.
313 */
314static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
315 struct page *page)
316{
317 unsigned long start;
318 int cpu = smp_processor_id();
319
320#ifndef CONFIG_SMP
321 if (!(vma->vm_flags & VM_EXEC))
322 return;
323#endif
324
325 /* Need to writeback any dirty data for that page, we have the PA */
326 start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
327 __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
328 /*
329 * If there's a context, bump the ASID (cheaper than a flush,
330 * since we don't know VAs!)
331 */
332 if (vma->vm_mm == current->active_mm) {
333 if (cpu_context(cpu, vma->vm_mm) != 0)
334 drop_mmu_context(vma->vm_mm, cpu);
335 } else
336 __sb1_flush_icache_range(start, start + PAGE_SIZE);
337
338}
339
340#ifdef CONFIG_SMP
341struct flush_icache_page_args {
342 struct vm_area_struct *vma;
343 struct page *page;
344};
345
346static void sb1_flush_icache_page_ipi(void *info)
347{
348 struct flush_icache_page_args *args = info;
349 local_sb1_flush_icache_page(args->vma, args->page);
350}
351
352/* Dirty dcache could be on another CPU, so do the IPIs */
353static void sb1_flush_icache_page(struct vm_area_struct *vma,
354 struct page *page)
355{
356 struct flush_icache_page_args args;
357
358 if (!(vma->vm_flags & VM_EXEC))
359 return;
360 args.vma = vma;
361 args.page = page;
362 on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
363}
364#else
365void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page)
366 __attribute__((alias("local_sb1_flush_icache_page")));
367#endif
368
369/*
370 * A signal trampoline must fit into a single cacheline. 310 * A signal trampoline must fit into a single cacheline.
371 */ 311 */
372static void local_sb1_flush_cache_sigtramp(unsigned long addr) 312static void local_sb1_flush_cache_sigtramp(unsigned long addr)
@@ -526,7 +466,6 @@ void sb1_cache_init(void)
526 466
527 /* These routines are for Icache coherence with the Dcache */ 467 /* These routines are for Icache coherence with the Dcache */
528 flush_icache_range = sb1_flush_icache_range; 468 flush_icache_range = sb1_flush_icache_range;
529 __flush_icache_page = sb1_flush_icache_page;
530 flush_icache_all = __sb1_flush_icache_all; /* local only */ 469 flush_icache_all = __sb1_flush_icache_all; /* local only */
531 470
532 /* This implies an Icache flush too, so can't be nop'ed */ 471 /* This implies an Icache flush too, so can't be nop'ed */
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 932a09d7ef84..f32ebde30ccf 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -248,33 +248,6 @@ static void tx39_flush_icache_range(unsigned long start, unsigned long end)
248 } 248 }
249} 249}
250 250
251/*
252 * Ok, this seriously sucks. We use them to flush a user page but don't
253 * know the virtual address, so we have to blast away the whole icache
254 * which is significantly more expensive than the real thing. Otoh we at
255 * least know the kernel address of the page so we can flush it
256 * selectivly.
257 */
258static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page)
259{
260 unsigned long addr;
261 /*
262 * If there's no context yet, or the page isn't executable, no icache
263 * flush is needed.
264 */
265 if (!(vma->vm_flags & VM_EXEC))
266 return;
267
268 addr = (unsigned long) page_address(page);
269 tx39_blast_dcache_page(addr);
270
271 /*
272 * We're not sure of the virtual address(es) involved here, so
273 * we have to flush the entire I-cache.
274 */
275 tx39_blast_icache();
276}
277
278static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) 251static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
279{ 252{
280 unsigned long end; 253 unsigned long end;
@@ -382,7 +355,6 @@ void __init tx39_cache_init(void)
382 flush_cache_mm = (void *) tx39h_flush_icache_all; 355 flush_cache_mm = (void *) tx39h_flush_icache_all;
383 flush_cache_range = (void *) tx39h_flush_icache_all; 356 flush_cache_range = (void *) tx39h_flush_icache_all;
384 flush_cache_page = (void *) tx39h_flush_icache_all; 357 flush_cache_page = (void *) tx39h_flush_icache_all;
385 __flush_icache_page = (void *) tx39h_flush_icache_all;
386 flush_icache_range = (void *) tx39h_flush_icache_all; 358 flush_icache_range = (void *) tx39h_flush_icache_all;
387 359
388 flush_cache_sigtramp = (void *) tx39h_flush_icache_all; 360 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
@@ -408,7 +380,6 @@ void __init tx39_cache_init(void)
408 flush_cache_mm = tx39_flush_cache_mm; 380 flush_cache_mm = tx39_flush_cache_mm;
409 flush_cache_range = tx39_flush_cache_range; 381 flush_cache_range = tx39_flush_cache_range;
410 flush_cache_page = tx39_flush_cache_page; 382 flush_cache_page = tx39_flush_cache_page;
411 __flush_icache_page = tx39_flush_icache_page;
412 flush_icache_range = tx39_flush_icache_range; 383 flush_icache_range = tx39_flush_icache_range;
413 384
414 flush_cache_sigtramp = tx39_flush_cache_sigtramp; 385 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 40c8b0235183..caf807ded514 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -25,7 +25,6 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
25void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 25void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
26 unsigned long pfn); 26 unsigned long pfn);
27void (*flush_icache_range)(unsigned long start, unsigned long end); 27void (*flush_icache_range)(unsigned long start, unsigned long end);
28void (*__flush_icache_page)(struct vm_area_struct *vma, struct page *page);
29 28
30/* MIPS specific cache operations */ 29/* MIPS specific cache operations */
31void (*flush_cache_sigtramp)(unsigned long addr); 30void (*flush_cache_sigtramp)(unsigned long addr);
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index 36416fdfcf68..9ab59e2bb233 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -46,8 +46,6 @@ static inline void flush_dcache_page(struct page *page)
46#define flush_dcache_mmap_lock(mapping) do { } while (0) 46#define flush_dcache_mmap_lock(mapping) do { } while (0)
47#define flush_dcache_mmap_unlock(mapping) do { } while (0) 47#define flush_dcache_mmap_unlock(mapping) do { } while (0)
48 48
49extern void (*__flush_icache_page)(struct vm_area_struct *vma,
50 struct page *page);
51static inline void flush_icache_page(struct vm_area_struct *vma, 49static inline void flush_icache_page(struct vm_area_struct *vma,
52 struct page *page) 50 struct page *page)
53{ 51{