aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-01 08:12:55 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-01 08:12:55 -0400
commit983f4c514c4c9ddac1077a2c805fd16cbe3f7487 (patch)
treec0fac3c691139178c545ebe7a8f8eb642937f163 /arch/sh
parentade315d83c1d53b3c6b820134cb16601351810fe (diff)
Revert "sh: Kill off now redundant local irq disabling."
This reverts commit 64a6d72213dd810dd55bd0a503c36150af41c3c3. Unfortunately we can't use on_each_cpu() for all of the cache ops, as some of them only require preempt disabling. This seems to be the same issue that impacts the mips r4k caches, where this code was based on. This fixes up a deadlock that showed up in some IRQ context cases. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/mm/cache-sh2a.c6
-rw-r--r--arch/sh/mm/cache-sh4.c61
-rw-r--r--arch/sh/mm/cache-sh5.c29
-rw-r--r--arch/sh/mm/cache-sh7705.c8
4 files changed, 71 insertions, 33 deletions
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
index d783361e3f0a..975899d83564 100644
--- a/arch/sh/mm/cache-sh2a.c
+++ b/arch/sh/mm/cache-sh2a.c
@@ -102,10 +102,12 @@ static void sh2a_flush_icache_range(void *args)
102 struct flusher_data *data = args; 102 struct flusher_data *data = args;
103 unsigned long start, end; 103 unsigned long start, end;
104 unsigned long v; 104 unsigned long v;
105 unsigned long flags;
105 106
106 start = data->addr1 & ~(L1_CACHE_BYTES-1); 107 start = data->addr1 & ~(L1_CACHE_BYTES-1);
107 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); 108 end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
108 109
110 local_irq_save(flags);
109 jump_to_uncached(); 111 jump_to_uncached();
110 112
111 for (v = start; v < end; v+=L1_CACHE_BYTES) { 113 for (v = start; v < end; v+=L1_CACHE_BYTES) {
@@ -120,10 +122,12 @@ static void sh2a_flush_icache_range(void *args)
120 } 122 }
121 } 123 }
122 /* I-Cache invalidate */ 124 /* I-Cache invalidate */
123 ctrl_outl(addr, CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); 125 ctrl_outl(addr,
126 CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008);
124 } 127 }
125 128
126 back_to_cached(); 129 back_to_cached();
130 local_irq_restore(flags);
127} 131}
128 132
129void __init sh2a_cache_init(void) 133void __init sh2a_cache_init(void)
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 70fb906419dd..3ac4945cb493 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -48,44 +48,48 @@ static void sh4_flush_icache_range(void *args)
48 struct flusher_data *data = args; 48 struct flusher_data *data = args;
49 int icacheaddr; 49 int icacheaddr;
50 unsigned long start, end; 50 unsigned long start, end;
51 unsigned long v; 51 unsigned long flags, v;
52 int i; 52 int i;
53 53
54 start = data->addr1; 54 start = data->addr1;
55 end = data->addr2; 55 end = data->addr2;
56 56
57 /* If there are too many pages then just blow the caches */ 57 /* If there are too many pages then just blow the caches */
58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
59 local_flush_cache_all(args); 59 local_flush_cache_all(args);
60 } else { 60 } else {
61 /* selectively flush d-cache then invalidate the i-cache */ 61 /* selectively flush d-cache then invalidate the i-cache */
62 /* this is inefficient, so only use for small ranges */ 62 /* this is inefficient, so only use for small ranges */
63 start &= ~(L1_CACHE_BYTES-1); 63 start &= ~(L1_CACHE_BYTES-1);
64 end += L1_CACHE_BYTES-1; 64 end += L1_CACHE_BYTES-1;
65 end &= ~(L1_CACHE_BYTES-1); 65 end &= ~(L1_CACHE_BYTES-1);
66 66
67 jump_to_uncached(); 67 local_irq_save(flags);
68 68 jump_to_uncached();
69 for (v = start; v < end; v+=L1_CACHE_BYTES) { 69
70 __ocbwb(v); 70 for (v = start; v < end; v+=L1_CACHE_BYTES) {
71 71 asm volatile("ocbwb %0"
72 icacheaddr = CACHE_IC_ADDRESS_ARRAY | 72 : /* no output */
73 (v & cpu_data->icache.entry_mask); 73 : "m" (__m(v)));
74 74
75 for (i = 0; i < cpu_data->icache.ways; 75 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
76 i++, icacheaddr += cpu_data->icache.way_incr) 76 v & cpu_data->icache.entry_mask);
77 /* Clear i-cache line valid-bit */ 77
78 ctrl_outl(0, icacheaddr); 78 for (i = 0; i < cpu_data->icache.ways;
79 } 79 i++, icacheaddr += cpu_data->icache.way_incr)
80 /* Clear i-cache line valid-bit */
81 ctrl_outl(0, icacheaddr);
82 }
80 83
81 back_to_cached(); 84 back_to_cached();
85 local_irq_restore(flags);
82 } 86 }
83} 87}
84 88
85static inline void flush_cache_4096(unsigned long start, 89static inline void flush_cache_4096(unsigned long start,
86 unsigned long phys) 90 unsigned long phys)
87{ 91{
88 unsigned long exec_offset = 0; 92 unsigned long flags, exec_offset = 0;
89 93
90 /* 94 /*
91 * All types of SH-4 require PC to be in P2 to operate on the I-cache. 95 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
@@ -95,8 +99,10 @@ static inline void flush_cache_4096(unsigned long start,
95 (start < CACHE_OC_ADDRESS_ARRAY)) 99 (start < CACHE_OC_ADDRESS_ARRAY))
96 exec_offset = 0x20000000; 100 exec_offset = 0x20000000;
97 101
102 local_irq_save(flags);
98 __flush_cache_4096(start | SH_CACHE_ASSOC, 103 __flush_cache_4096(start | SH_CACHE_ASSOC,
99 P1SEGADDR(phys), exec_offset); 104 P1SEGADDR(phys), exec_offset);
105 local_irq_restore(flags);
100} 106}
101 107
102/* 108/*
@@ -130,8 +136,9 @@ static void sh4_flush_dcache_page(void *arg)
130/* TODO: Selective icache invalidation through IC address array.. */ 136/* TODO: Selective icache invalidation through IC address array.. */
131static void __uses_jump_to_uncached flush_icache_all(void) 137static void __uses_jump_to_uncached flush_icache_all(void)
132{ 138{
133 unsigned long ccr; 139 unsigned long flags, ccr;
134 140
141 local_irq_save(flags);
135 jump_to_uncached(); 142 jump_to_uncached();
136 143
137 /* Flush I-cache */ 144 /* Flush I-cache */
@@ -143,7 +150,9 @@ static void __uses_jump_to_uncached flush_icache_all(void)
143 * back_to_cached() will take care of the barrier for us, don't add 150 * back_to_cached() will take care of the barrier for us, don't add
144 * another one! 151 * another one!
145 */ 152 */
153
146 back_to_cached(); 154 back_to_cached();
155 local_irq_restore(flags);
147} 156}
148 157
149static inline void flush_dcache_all(void) 158static inline void flush_dcache_all(void)
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 2f9dd6df00a6..467ff8e260f7 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -34,22 +34,28 @@ static inline void
34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, 34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
35 unsigned long paddr) 35 unsigned long paddr)
36{ 36{
37 local_irq_disable();
37 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); 38 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
38} 39}
39 40
40static inline void sh64_teardown_dtlb_cache_slot(void) 41static inline void sh64_teardown_dtlb_cache_slot(void)
41{ 42{
42 sh64_teardown_tlb_slot(dtlb_cache_slot); 43 sh64_teardown_tlb_slot(dtlb_cache_slot);
44 local_irq_enable();
43} 45}
44 46
45static inline void sh64_icache_inv_all(void) 47static inline void sh64_icache_inv_all(void)
46{ 48{
47 unsigned long long addr, flag, data; 49 unsigned long long addr, flag, data;
50 unsigned long flags;
48 51
49 addr = ICCR0; 52 addr = ICCR0;
50 flag = ICCR0_ICI; 53 flag = ICCR0_ICI;
51 data = 0; 54 data = 0;
52 55
56 /* Make this a critical section for safety (probably not strictly necessary.) */
57 local_irq_save(flags);
58
53 /* Without %1 it gets unexplicably wrong */ 59 /* Without %1 it gets unexplicably wrong */
54 __asm__ __volatile__ ( 60 __asm__ __volatile__ (
55 "getcfg %3, 0, %0\n\t" 61 "getcfg %3, 0, %0\n\t"
@@ -58,6 +64,8 @@ static inline void sh64_icache_inv_all(void)
58 "synci" 64 "synci"
59 : "=&r" (data) 65 : "=&r" (data)
60 : "0" (data), "r" (flag), "r" (addr)); 66 : "0" (data), "r" (flag), "r" (addr));
67
68 local_irq_restore(flags);
61} 69}
62 70
63static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) 71static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
@@ -82,6 +90,7 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
82 Also, eaddr is page-aligned. */ 90 Also, eaddr is page-aligned. */
83 unsigned int cpu = smp_processor_id(); 91 unsigned int cpu = smp_processor_id();
84 unsigned long long addr, end_addr; 92 unsigned long long addr, end_addr;
93 unsigned long flags = 0;
85 unsigned long running_asid, vma_asid; 94 unsigned long running_asid, vma_asid;
86 addr = eaddr; 95 addr = eaddr;
87 end_addr = addr + PAGE_SIZE; 96 end_addr = addr + PAGE_SIZE;
@@ -102,9 +111,10 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
102 111
103 running_asid = get_asid(); 112 running_asid = get_asid();
104 vma_asid = cpu_asid(cpu, vma->vm_mm); 113 vma_asid = cpu_asid(cpu, vma->vm_mm);
105 if (running_asid != vma_asid) 114 if (running_asid != vma_asid) {
115 local_irq_save(flags);
106 switch_and_save_asid(vma_asid); 116 switch_and_save_asid(vma_asid);
107 117 }
108 while (addr < end_addr) { 118 while (addr < end_addr) {
109 /* Worth unrolling a little */ 119 /* Worth unrolling a little */
110 __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); 120 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
@@ -113,9 +123,10 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
113 __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); 123 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
114 addr += 128; 124 addr += 128;
115 } 125 }
116 126 if (running_asid != vma_asid) {
117 if (running_asid != vma_asid)
118 switch_and_save_asid(running_asid); 127 switch_and_save_asid(running_asid);
128 local_irq_restore(flags);
129 }
119} 130}
120 131
121static void sh64_icache_inv_user_page_range(struct mm_struct *mm, 132static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
@@ -148,12 +159,16 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
148 unsigned long eaddr; 159 unsigned long eaddr;
149 unsigned long after_last_page_start; 160 unsigned long after_last_page_start;
150 unsigned long mm_asid, current_asid; 161 unsigned long mm_asid, current_asid;
162 unsigned long flags = 0;
151 163
152 mm_asid = cpu_asid(smp_processor_id(), mm); 164 mm_asid = cpu_asid(smp_processor_id(), mm);
153 current_asid = get_asid(); 165 current_asid = get_asid();
154 166
155 if (mm_asid != current_asid) 167 if (mm_asid != current_asid) {
168 /* Switch ASID and run the invalidate loop under cli */
169 local_irq_save(flags);
156 switch_and_save_asid(mm_asid); 170 switch_and_save_asid(mm_asid);
171 }
157 172
158 aligned_start = start & PAGE_MASK; 173 aligned_start = start & PAGE_MASK;
159 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); 174 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
@@ -179,8 +194,10 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
179 aligned_start = vma->vm_end; /* Skip to start of next region */ 194 aligned_start = vma->vm_end; /* Skip to start of next region */
180 } 195 }
181 196
182 if (mm_asid != current_asid) 197 if (mm_asid != current_asid) {
183 switch_and_save_asid(current_asid); 198 switch_and_save_asid(current_asid);
199 local_irq_restore(flags);
200 }
184 } 201 }
185} 202}
186 203
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 9dc38660e3de..6293f57fa888 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -81,6 +81,7 @@ static void sh7705_flush_icache_range(void *args)
81static void __flush_dcache_page(unsigned long phys) 81static void __flush_dcache_page(unsigned long phys)
82{ 82{
83 unsigned long ways, waysize, addrstart; 83 unsigned long ways, waysize, addrstart;
84 unsigned long flags;
84 85
85 phys |= SH_CACHE_VALID; 86 phys |= SH_CACHE_VALID;
86 87
@@ -97,6 +98,7 @@ static void __flush_dcache_page(unsigned long phys)
97 * potential cache aliasing, therefore the optimisation is probably not 98 * potential cache aliasing, therefore the optimisation is probably not
98 * possible. 99 * possible.
99 */ 100 */
101 local_irq_save(flags);
100 jump_to_uncached(); 102 jump_to_uncached();
101 103
102 ways = current_cpu_data.dcache.ways; 104 ways = current_cpu_data.dcache.ways;
@@ -124,6 +126,7 @@ static void __flush_dcache_page(unsigned long phys)
124 } while (--ways); 126 } while (--ways);
125 127
126 back_to_cached(); 128 back_to_cached();
129 local_irq_restore(flags);
127} 130}
128 131
129/* 132/*
@@ -142,9 +145,14 @@ static void sh7705_flush_dcache_page(void *page)
142 145
143static void sh7705_flush_cache_all(void *args) 146static void sh7705_flush_cache_all(void *args)
144{ 147{
148 unsigned long flags;
149
150 local_irq_save(flags);
145 jump_to_uncached(); 151 jump_to_uncached();
152
146 cache_wback_all(); 153 cache_wback_all();
147 back_to_cached(); 154 back_to_cached();
155 local_irq_restore(flags);
148} 156}
149 157
150/* 158/*