aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-01 08:12:55 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-01 08:12:55 -0400
commit983f4c514c4c9ddac1077a2c805fd16cbe3f7487 (patch)
treec0fac3c691139178c545ebe7a8f8eb642937f163 /arch/sh/mm/cache-sh4.c
parentade315d83c1d53b3c6b820134cb16601351810fe (diff)
Revert "sh: Kill off now redundant local irq disabling."
This reverts commit 64a6d72213dd810dd55bd0a503c36150af41c3c3. Unfortunately we can't use on_each_cpu() for all of the cache ops, as some of them only require preempt disabling. This seems to be the same issue that impacts the mips r4k caches, where this code was based on. This fixes up a deadlock that showed up in some IRQ context cases. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c61
1 files changed, 35 insertions, 26 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 70fb906419dd..3ac4945cb493 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -48,44 +48,48 @@ static void sh4_flush_icache_range(void *args)
48 struct flusher_data *data = args; 48 struct flusher_data *data = args;
49 int icacheaddr; 49 int icacheaddr;
50 unsigned long start, end; 50 unsigned long start, end;
51 unsigned long v; 51 unsigned long flags, v;
52 int i; 52 int i;
53 53
54 start = data->addr1; 54 start = data->addr1;
55 end = data->addr2; 55 end = data->addr2;
56 56
57 /* If there are too many pages then just blow the caches */ 57 /* If there are too many pages then just blow the caches */
58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
59 local_flush_cache_all(args); 59 local_flush_cache_all(args);
60 } else { 60 } else {
61 /* selectively flush d-cache then invalidate the i-cache */ 61 /* selectively flush d-cache then invalidate the i-cache */
62 /* this is inefficient, so only use for small ranges */ 62 /* this is inefficient, so only use for small ranges */
63 start &= ~(L1_CACHE_BYTES-1); 63 start &= ~(L1_CACHE_BYTES-1);
64 end += L1_CACHE_BYTES-1; 64 end += L1_CACHE_BYTES-1;
65 end &= ~(L1_CACHE_BYTES-1); 65 end &= ~(L1_CACHE_BYTES-1);
66 66
67 jump_to_uncached(); 67 local_irq_save(flags);
68 68 jump_to_uncached();
69 for (v = start; v < end; v+=L1_CACHE_BYTES) { 69
70 __ocbwb(v); 70 for (v = start; v < end; v+=L1_CACHE_BYTES) {
71 71 asm volatile("ocbwb %0"
72 icacheaddr = CACHE_IC_ADDRESS_ARRAY | 72 : /* no output */
73 (v & cpu_data->icache.entry_mask); 73 : "m" (__m(v)));
74 74
75 for (i = 0; i < cpu_data->icache.ways; 75 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
76 i++, icacheaddr += cpu_data->icache.way_incr) 76 v & cpu_data->icache.entry_mask);
77 /* Clear i-cache line valid-bit */ 77
78 ctrl_outl(0, icacheaddr); 78 for (i = 0; i < cpu_data->icache.ways;
79 } 79 i++, icacheaddr += cpu_data->icache.way_incr)
80 /* Clear i-cache line valid-bit */
81 ctrl_outl(0, icacheaddr);
82 }
80 83
81 back_to_cached(); 84 back_to_cached();
85 local_irq_restore(flags);
82 } 86 }
83} 87}
84 88
85static inline void flush_cache_4096(unsigned long start, 89static inline void flush_cache_4096(unsigned long start,
86 unsigned long phys) 90 unsigned long phys)
87{ 91{
88 unsigned long exec_offset = 0; 92 unsigned long flags, exec_offset = 0;
89 93
90 /* 94 /*
91 * All types of SH-4 require PC to be in P2 to operate on the I-cache. 95 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
@@ -95,8 +99,10 @@ static inline void flush_cache_4096(unsigned long start,
95 (start < CACHE_OC_ADDRESS_ARRAY)) 99 (start < CACHE_OC_ADDRESS_ARRAY))
96 exec_offset = 0x20000000; 100 exec_offset = 0x20000000;
97 101
102 local_irq_save(flags);
98 __flush_cache_4096(start | SH_CACHE_ASSOC, 103 __flush_cache_4096(start | SH_CACHE_ASSOC,
99 P1SEGADDR(phys), exec_offset); 104 P1SEGADDR(phys), exec_offset);
105 local_irq_restore(flags);
100} 106}
101 107
102/* 108/*
@@ -130,8 +136,9 @@ static void sh4_flush_dcache_page(void *arg)
130/* TODO: Selective icache invalidation through IC address array.. */ 136/* TODO: Selective icache invalidation through IC address array.. */
131static void __uses_jump_to_uncached flush_icache_all(void) 137static void __uses_jump_to_uncached flush_icache_all(void)
132{ 138{
133 unsigned long ccr; 139 unsigned long flags, ccr;
134 140
141 local_irq_save(flags);
135 jump_to_uncached(); 142 jump_to_uncached();
136 143
137 /* Flush I-cache */ 144 /* Flush I-cache */
@@ -143,7 +150,9 @@ static void __uses_jump_to_uncached flush_icache_all(void)
143 * back_to_cached() will take care of the barrier for us, don't add 150 * back_to_cached() will take care of the barrier for us, don't add
144 * another one! 151 * another one!
145 */ 152 */
153
146 back_to_cached(); 154 back_to_cached();
155 local_irq_restore(flags);
147} 156}
148 157
149static inline void flush_dcache_all(void) 158static inline void flush_dcache_all(void)