aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh5.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-21 05:21:07 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-21 05:21:07 -0400
commit64a6d72213dd810dd55bd0a503c36150af41c3c3 (patch)
tree81f2f6e66d3a38f5cb7a27f0a85b365b25469fe4 /arch/sh/mm/cache-sh5.c
parentf26b2a562b46ab186c8383993ab1332673ac4a47 (diff)
sh: Kill off now redundant local irq disabling.
on_each_cpu() takes care of IRQ and preempt handling, the localized handling in each of the called functions can be killed off. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh5.c')
-rw-r--r--arch/sh/mm/cache-sh5.c29
1 files changed, 6 insertions, 23 deletions
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 467ff8e260f7..2f9dd6df00a6 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -34,28 +34,22 @@ static inline void
34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, 34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
35 unsigned long paddr) 35 unsigned long paddr)
36{ 36{
37 local_irq_disable();
38 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); 37 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
39} 38}
40 39
41static inline void sh64_teardown_dtlb_cache_slot(void) 40static inline void sh64_teardown_dtlb_cache_slot(void)
42{ 41{
43 sh64_teardown_tlb_slot(dtlb_cache_slot); 42 sh64_teardown_tlb_slot(dtlb_cache_slot);
44 local_irq_enable();
45} 43}
46 44
47static inline void sh64_icache_inv_all(void) 45static inline void sh64_icache_inv_all(void)
48{ 46{
49 unsigned long long addr, flag, data; 47 unsigned long long addr, flag, data;
50 unsigned long flags;
51 48
52 addr = ICCR0; 49 addr = ICCR0;
53 flag = ICCR0_ICI; 50 flag = ICCR0_ICI;
54 data = 0; 51 data = 0;
55 52
56 /* Make this a critical section for safety (probably not strictly necessary.) */
57 local_irq_save(flags);
58
59 /* Without %1 it gets unexplicably wrong */ 53 /* Without %1 it gets unexplicably wrong */
60 __asm__ __volatile__ ( 54 __asm__ __volatile__ (
61 "getcfg %3, 0, %0\n\t" 55 "getcfg %3, 0, %0\n\t"
@@ -64,8 +58,6 @@ static inline void sh64_icache_inv_all(void)
64 "synci" 58 "synci"
65 : "=&r" (data) 59 : "=&r" (data)
66 : "0" (data), "r" (flag), "r" (addr)); 60 : "0" (data), "r" (flag), "r" (addr));
67
68 local_irq_restore(flags);
69} 61}
70 62
71static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) 63static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
@@ -90,7 +82,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
90 Also, eaddr is page-aligned. */ 82 Also, eaddr is page-aligned. */
91 unsigned int cpu = smp_processor_id(); 83 unsigned int cpu = smp_processor_id();
92 unsigned long long addr, end_addr; 84 unsigned long long addr, end_addr;
93 unsigned long flags = 0;
94 unsigned long running_asid, vma_asid; 85 unsigned long running_asid, vma_asid;
95 addr = eaddr; 86 addr = eaddr;
96 end_addr = addr + PAGE_SIZE; 87 end_addr = addr + PAGE_SIZE;
@@ -111,10 +102,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
111 102
112 running_asid = get_asid(); 103 running_asid = get_asid();
113 vma_asid = cpu_asid(cpu, vma->vm_mm); 104 vma_asid = cpu_asid(cpu, vma->vm_mm);
114 if (running_asid != vma_asid) { 105 if (running_asid != vma_asid)
115 local_irq_save(flags);
116 switch_and_save_asid(vma_asid); 106 switch_and_save_asid(vma_asid);
117 } 107
118 while (addr < end_addr) { 108 while (addr < end_addr) {
119 /* Worth unrolling a little */ 109 /* Worth unrolling a little */
120 __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); 110 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
@@ -123,10 +113,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
123 __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); 113 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
124 addr += 128; 114 addr += 128;
125 } 115 }
126 if (running_asid != vma_asid) { 116
117 if (running_asid != vma_asid)
127 switch_and_save_asid(running_asid); 118 switch_and_save_asid(running_asid);
128 local_irq_restore(flags);
129 }
130} 119}
131 120
132static void sh64_icache_inv_user_page_range(struct mm_struct *mm, 121static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
@@ -159,16 +148,12 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
159 unsigned long eaddr; 148 unsigned long eaddr;
160 unsigned long after_last_page_start; 149 unsigned long after_last_page_start;
161 unsigned long mm_asid, current_asid; 150 unsigned long mm_asid, current_asid;
162 unsigned long flags = 0;
163 151
164 mm_asid = cpu_asid(smp_processor_id(), mm); 152 mm_asid = cpu_asid(smp_processor_id(), mm);
165 current_asid = get_asid(); 153 current_asid = get_asid();
166 154
167 if (mm_asid != current_asid) { 155 if (mm_asid != current_asid)
168 /* Switch ASID and run the invalidate loop under cli */
169 local_irq_save(flags);
170 switch_and_save_asid(mm_asid); 156 switch_and_save_asid(mm_asid);
171 }
172 157
173 aligned_start = start & PAGE_MASK; 158 aligned_start = start & PAGE_MASK;
174 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); 159 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
@@ -194,10 +179,8 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
194 aligned_start = vma->vm_end; /* Skip to start of next region */ 179 aligned_start = vma->vm_end; /* Skip to start of next region */
195 } 180 }
196 181
197 if (mm_asid != current_asid) { 182 if (mm_asid != current_asid)
198 switch_and_save_asid(current_asid); 183 switch_and_save_asid(current_asid);
199 local_irq_restore(flags);
200 }
201 } 184 }
202} 185}
203 186