diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 05:21:07 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 05:21:07 -0400 |
commit | 64a6d72213dd810dd55bd0a503c36150af41c3c3 (patch) | |
tree | 81f2f6e66d3a38f5cb7a27f0a85b365b25469fe4 /arch/sh/mm | |
parent | f26b2a562b46ab186c8383993ab1332673ac4a47 (diff) |
sh: Kill off now redundant local irq disabling.
on_each_cpu() takes care of IRQ and preempt handling, the localized
handling in each of the called functions can be killed off.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/cache-sh2a.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 61 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 29 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 8 |
4 files changed, 33 insertions, 71 deletions
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 975899d83564..d783361e3f0a 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -102,12 +102,10 @@ static void sh2a_flush_icache_range(void *args) | |||
102 | struct flusher_data *data = args; | 102 | struct flusher_data *data = args; |
103 | unsigned long start, end; | 103 | unsigned long start, end; |
104 | unsigned long v; | 104 | unsigned long v; |
105 | unsigned long flags; | ||
106 | 105 | ||
107 | start = data->addr1 & ~(L1_CACHE_BYTES-1); | 106 | start = data->addr1 & ~(L1_CACHE_BYTES-1); |
108 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); | 107 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); |
109 | 108 | ||
110 | local_irq_save(flags); | ||
111 | jump_to_uncached(); | 109 | jump_to_uncached(); |
112 | 110 | ||
113 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 111 | for (v = start; v < end; v+=L1_CACHE_BYTES) { |
@@ -122,12 +120,10 @@ static void sh2a_flush_icache_range(void *args) | |||
122 | } | 120 | } |
123 | } | 121 | } |
124 | /* I-Cache invalidate */ | 122 | /* I-Cache invalidate */ |
125 | ctrl_outl(addr, | 123 | ctrl_outl(addr, CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); |
126 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); | ||
127 | } | 124 | } |
128 | 125 | ||
129 | back_to_cached(); | 126 | back_to_cached(); |
130 | local_irq_restore(flags); | ||
131 | } | 127 | } |
132 | 128 | ||
133 | void __init sh2a_cache_init(void) | 129 | void __init sh2a_cache_init(void) |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 9201b37c7cca..e3b77f0fa470 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -48,48 +48,44 @@ static void sh4_flush_icache_range(void *args) | |||
48 | struct flusher_data *data = args; | 48 | struct flusher_data *data = args; |
49 | int icacheaddr; | 49 | int icacheaddr; |
50 | unsigned long start, end; | 50 | unsigned long start, end; |
51 | unsigned long flags, v; | 51 | unsigned long v; |
52 | int i; | 52 | int i; |
53 | 53 | ||
54 | start = data->addr1; | 54 | start = data->addr1; |
55 | end = data->addr2; | 55 | end = data->addr2; |
56 | 56 | ||
57 | /* If there are too many pages then just blow the caches */ | 57 | /* If there are too many pages then just blow the caches */ |
58 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 58 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
59 | local_flush_cache_all(args); | 59 | local_flush_cache_all(args); |
60 | } else { | 60 | } else { |
61 | /* selectively flush d-cache then invalidate the i-cache */ | 61 | /* selectively flush d-cache then invalidate the i-cache */ |
62 | /* this is inefficient, so only use for small ranges */ | 62 | /* this is inefficient, so only use for small ranges */ |
63 | start &= ~(L1_CACHE_BYTES-1); | 63 | start &= ~(L1_CACHE_BYTES-1); |
64 | end += L1_CACHE_BYTES-1; | 64 | end += L1_CACHE_BYTES-1; |
65 | end &= ~(L1_CACHE_BYTES-1); | 65 | end &= ~(L1_CACHE_BYTES-1); |
66 | 66 | ||
67 | local_irq_save(flags); | 67 | jump_to_uncached(); |
68 | jump_to_uncached(); | 68 | |
69 | 69 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | |
70 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 70 | __ocbwb(v); |
71 | asm volatile("ocbwb %0" | 71 | |
72 | : /* no output */ | 72 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | |
73 | : "m" (__m(v))); | 73 | (v & cpu_data->icache.entry_mask); |
74 | 74 | ||
75 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | ( | 75 | for (i = 0; i < cpu_data->icache.ways; |
76 | v & cpu_data->icache.entry_mask); | 76 | i++, icacheaddr += cpu_data->icache.way_incr) |
77 | 77 | /* Clear i-cache line valid-bit */ | |
78 | for (i = 0; i < cpu_data->icache.ways; | 78 | ctrl_outl(0, icacheaddr); |
79 | i++, icacheaddr += cpu_data->icache.way_incr) | 79 | } |
80 | /* Clear i-cache line valid-bit */ | ||
81 | ctrl_outl(0, icacheaddr); | ||
82 | } | ||
83 | 80 | ||
84 | back_to_cached(); | 81 | back_to_cached(); |
85 | local_irq_restore(flags); | ||
86 | } | 82 | } |
87 | } | 83 | } |
88 | 84 | ||
89 | static inline void flush_cache_4096(unsigned long start, | 85 | static inline void flush_cache_4096(unsigned long start, |
90 | unsigned long phys) | 86 | unsigned long phys) |
91 | { | 87 | { |
92 | unsigned long flags, exec_offset = 0; | 88 | unsigned long exec_offset = 0; |
93 | 89 | ||
94 | /* | 90 | /* |
95 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 91 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. |
@@ -99,10 +95,8 @@ static inline void flush_cache_4096(unsigned long start, | |||
99 | (start < CACHE_OC_ADDRESS_ARRAY)) | 95 | (start < CACHE_OC_ADDRESS_ARRAY)) |
100 | exec_offset = 0x20000000; | 96 | exec_offset = 0x20000000; |
101 | 97 | ||
102 | local_irq_save(flags); | ||
103 | __flush_cache_4096(start | SH_CACHE_ASSOC, | 98 | __flush_cache_4096(start | SH_CACHE_ASSOC, |
104 | P1SEGADDR(phys), exec_offset); | 99 | P1SEGADDR(phys), exec_offset); |
105 | local_irq_restore(flags); | ||
106 | } | 100 | } |
107 | 101 | ||
108 | /* | 102 | /* |
@@ -135,9 +129,8 @@ static void sh4_flush_dcache_page(void *page) | |||
135 | /* TODO: Selective icache invalidation through IC address array.. */ | 129 | /* TODO: Selective icache invalidation through IC address array.. */ |
136 | static void __uses_jump_to_uncached flush_icache_all(void) | 130 | static void __uses_jump_to_uncached flush_icache_all(void) |
137 | { | 131 | { |
138 | unsigned long flags, ccr; | 132 | unsigned long ccr; |
139 | 133 | ||
140 | local_irq_save(flags); | ||
141 | jump_to_uncached(); | 134 | jump_to_uncached(); |
142 | 135 | ||
143 | /* Flush I-cache */ | 136 | /* Flush I-cache */ |
@@ -149,9 +142,7 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
149 | * back_to_cached() will take care of the barrier for us, don't add | 142 | * back_to_cached() will take care of the barrier for us, don't add |
150 | * another one! | 143 | * another one! |
151 | */ | 144 | */ |
152 | |||
153 | back_to_cached(); | 145 | back_to_cached(); |
154 | local_irq_restore(flags); | ||
155 | } | 146 | } |
156 | 147 | ||
157 | static inline void flush_dcache_all(void) | 148 | static inline void flush_dcache_all(void) |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 467ff8e260f7..2f9dd6df00a6 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -34,28 +34,22 @@ static inline void | |||
34 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, | 34 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, |
35 | unsigned long paddr) | 35 | unsigned long paddr) |
36 | { | 36 | { |
37 | local_irq_disable(); | ||
38 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); | 37 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); |
39 | } | 38 | } |
40 | 39 | ||
41 | static inline void sh64_teardown_dtlb_cache_slot(void) | 40 | static inline void sh64_teardown_dtlb_cache_slot(void) |
42 | { | 41 | { |
43 | sh64_teardown_tlb_slot(dtlb_cache_slot); | 42 | sh64_teardown_tlb_slot(dtlb_cache_slot); |
44 | local_irq_enable(); | ||
45 | } | 43 | } |
46 | 44 | ||
47 | static inline void sh64_icache_inv_all(void) | 45 | static inline void sh64_icache_inv_all(void) |
48 | { | 46 | { |
49 | unsigned long long addr, flag, data; | 47 | unsigned long long addr, flag, data; |
50 | unsigned long flags; | ||
51 | 48 | ||
52 | addr = ICCR0; | 49 | addr = ICCR0; |
53 | flag = ICCR0_ICI; | 50 | flag = ICCR0_ICI; |
54 | data = 0; | 51 | data = 0; |
55 | 52 | ||
56 | /* Make this a critical section for safety (probably not strictly necessary.) */ | ||
57 | local_irq_save(flags); | ||
58 | |||
59 | /* Without %1 it gets unexplicably wrong */ | 53 | /* Without %1 it gets unexplicably wrong */ |
60 | __asm__ __volatile__ ( | 54 | __asm__ __volatile__ ( |
61 | "getcfg %3, 0, %0\n\t" | 55 | "getcfg %3, 0, %0\n\t" |
@@ -64,8 +58,6 @@ static inline void sh64_icache_inv_all(void) | |||
64 | "synci" | 58 | "synci" |
65 | : "=&r" (data) | 59 | : "=&r" (data) |
66 | : "0" (data), "r" (flag), "r" (addr)); | 60 | : "0" (data), "r" (flag), "r" (addr)); |
67 | |||
68 | local_irq_restore(flags); | ||
69 | } | 61 | } |
70 | 62 | ||
71 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) | 63 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) |
@@ -90,7 +82,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
90 | Also, eaddr is page-aligned. */ | 82 | Also, eaddr is page-aligned. */ |
91 | unsigned int cpu = smp_processor_id(); | 83 | unsigned int cpu = smp_processor_id(); |
92 | unsigned long long addr, end_addr; | 84 | unsigned long long addr, end_addr; |
93 | unsigned long flags = 0; | ||
94 | unsigned long running_asid, vma_asid; | 85 | unsigned long running_asid, vma_asid; |
95 | addr = eaddr; | 86 | addr = eaddr; |
96 | end_addr = addr + PAGE_SIZE; | 87 | end_addr = addr + PAGE_SIZE; |
@@ -111,10 +102,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
111 | 102 | ||
112 | running_asid = get_asid(); | 103 | running_asid = get_asid(); |
113 | vma_asid = cpu_asid(cpu, vma->vm_mm); | 104 | vma_asid = cpu_asid(cpu, vma->vm_mm); |
114 | if (running_asid != vma_asid) { | 105 | if (running_asid != vma_asid) |
115 | local_irq_save(flags); | ||
116 | switch_and_save_asid(vma_asid); | 106 | switch_and_save_asid(vma_asid); |
117 | } | 107 | |
118 | while (addr < end_addr) { | 108 | while (addr < end_addr) { |
119 | /* Worth unrolling a little */ | 109 | /* Worth unrolling a little */ |
120 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); | 110 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); |
@@ -123,10 +113,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
123 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); | 113 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); |
124 | addr += 128; | 114 | addr += 128; |
125 | } | 115 | } |
126 | if (running_asid != vma_asid) { | 116 | |
117 | if (running_asid != vma_asid) | ||
127 | switch_and_save_asid(running_asid); | 118 | switch_and_save_asid(running_asid); |
128 | local_irq_restore(flags); | ||
129 | } | ||
130 | } | 119 | } |
131 | 120 | ||
132 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | 121 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, |
@@ -159,16 +148,12 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
159 | unsigned long eaddr; | 148 | unsigned long eaddr; |
160 | unsigned long after_last_page_start; | 149 | unsigned long after_last_page_start; |
161 | unsigned long mm_asid, current_asid; | 150 | unsigned long mm_asid, current_asid; |
162 | unsigned long flags = 0; | ||
163 | 151 | ||
164 | mm_asid = cpu_asid(smp_processor_id(), mm); | 152 | mm_asid = cpu_asid(smp_processor_id(), mm); |
165 | current_asid = get_asid(); | 153 | current_asid = get_asid(); |
166 | 154 | ||
167 | if (mm_asid != current_asid) { | 155 | if (mm_asid != current_asid) |
168 | /* Switch ASID and run the invalidate loop under cli */ | ||
169 | local_irq_save(flags); | ||
170 | switch_and_save_asid(mm_asid); | 156 | switch_and_save_asid(mm_asid); |
171 | } | ||
172 | 157 | ||
173 | aligned_start = start & PAGE_MASK; | 158 | aligned_start = start & PAGE_MASK; |
174 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); | 159 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); |
@@ -194,10 +179,8 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
194 | aligned_start = vma->vm_end; /* Skip to start of next region */ | 179 | aligned_start = vma->vm_end; /* Skip to start of next region */ |
195 | } | 180 | } |
196 | 181 | ||
197 | if (mm_asid != current_asid) { | 182 | if (mm_asid != current_asid) |
198 | switch_and_save_asid(current_asid); | 183 | switch_and_save_asid(current_asid); |
199 | local_irq_restore(flags); | ||
200 | } | ||
201 | } | 184 | } |
202 | } | 185 | } |
203 | 186 | ||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 6293f57fa888..9dc38660e3de 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -81,7 +81,6 @@ static void sh7705_flush_icache_range(void *args) | |||
81 | static void __flush_dcache_page(unsigned long phys) | 81 | static void __flush_dcache_page(unsigned long phys) |
82 | { | 82 | { |
83 | unsigned long ways, waysize, addrstart; | 83 | unsigned long ways, waysize, addrstart; |
84 | unsigned long flags; | ||
85 | 84 | ||
86 | phys |= SH_CACHE_VALID; | 85 | phys |= SH_CACHE_VALID; |
87 | 86 | ||
@@ -98,7 +97,6 @@ static void __flush_dcache_page(unsigned long phys) | |||
98 | * potential cache aliasing, therefore the optimisation is probably not | 97 | * potential cache aliasing, therefore the optimisation is probably not |
99 | * possible. | 98 | * possible. |
100 | */ | 99 | */ |
101 | local_irq_save(flags); | ||
102 | jump_to_uncached(); | 100 | jump_to_uncached(); |
103 | 101 | ||
104 | ways = current_cpu_data.dcache.ways; | 102 | ways = current_cpu_data.dcache.ways; |
@@ -126,7 +124,6 @@ static void __flush_dcache_page(unsigned long phys) | |||
126 | } while (--ways); | 124 | } while (--ways); |
127 | 125 | ||
128 | back_to_cached(); | 126 | back_to_cached(); |
129 | local_irq_restore(flags); | ||
130 | } | 127 | } |
131 | 128 | ||
132 | /* | 129 | /* |
@@ -145,14 +142,9 @@ static void sh7705_flush_dcache_page(void *page) | |||
145 | 142 | ||
146 | static void sh7705_flush_cache_all(void *args) | 143 | static void sh7705_flush_cache_all(void *args) |
147 | { | 144 | { |
148 | unsigned long flags; | ||
149 | |||
150 | local_irq_save(flags); | ||
151 | jump_to_uncached(); | 145 | jump_to_uncached(); |
152 | |||
153 | cache_wback_all(); | 146 | cache_wback_all(); |
154 | back_to_cached(); | 147 | back_to_cached(); |
155 | local_irq_restore(flags); | ||
156 | } | 148 | } |
157 | 149 | ||
158 | /* | 150 | /* |