diff options
Diffstat (limited to 'arch/sh/mm/cache-sh5.c')
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 2f9dd6df00a6..467ff8e260f7 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -34,22 +34,28 @@ static inline void | |||
34 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, | 34 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, |
35 | unsigned long paddr) | 35 | unsigned long paddr) |
36 | { | 36 | { |
37 | local_irq_disable(); | ||
37 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); | 38 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); |
38 | } | 39 | } |
39 | 40 | ||
40 | static inline void sh64_teardown_dtlb_cache_slot(void) | 41 | static inline void sh64_teardown_dtlb_cache_slot(void) |
41 | { | 42 | { |
42 | sh64_teardown_tlb_slot(dtlb_cache_slot); | 43 | sh64_teardown_tlb_slot(dtlb_cache_slot); |
44 | local_irq_enable(); | ||
43 | } | 45 | } |
44 | 46 | ||
45 | static inline void sh64_icache_inv_all(void) | 47 | static inline void sh64_icache_inv_all(void) |
46 | { | 48 | { |
47 | unsigned long long addr, flag, data; | 49 | unsigned long long addr, flag, data; |
50 | unsigned long flags; | ||
48 | 51 | ||
49 | addr = ICCR0; | 52 | addr = ICCR0; |
50 | flag = ICCR0_ICI; | 53 | flag = ICCR0_ICI; |
51 | data = 0; | 54 | data = 0; |
52 | 55 | ||
56 | /* Make this a critical section for safety (probably not strictly necessary.) */ | ||
57 | local_irq_save(flags); | ||
58 | |||
53 | /* Without %1 it gets unexplicably wrong */ | 59 | /* Without %1 it gets unexplicably wrong */ |
54 | __asm__ __volatile__ ( | 60 | __asm__ __volatile__ ( |
55 | "getcfg %3, 0, %0\n\t" | 61 | "getcfg %3, 0, %0\n\t" |
@@ -58,6 +64,8 @@ static inline void sh64_icache_inv_all(void) | |||
58 | "synci" | 64 | "synci" |
59 | : "=&r" (data) | 65 | : "=&r" (data) |
60 | : "0" (data), "r" (flag), "r" (addr)); | 66 | : "0" (data), "r" (flag), "r" (addr)); |
67 | |||
68 | local_irq_restore(flags); | ||
61 | } | 69 | } |
62 | 70 | ||
63 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) | 71 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) |
@@ -82,6 +90,7 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
82 | Also, eaddr is page-aligned. */ | 90 | Also, eaddr is page-aligned. */ |
83 | unsigned int cpu = smp_processor_id(); | 91 | unsigned int cpu = smp_processor_id(); |
84 | unsigned long long addr, end_addr; | 92 | unsigned long long addr, end_addr; |
93 | unsigned long flags = 0; | ||
85 | unsigned long running_asid, vma_asid; | 94 | unsigned long running_asid, vma_asid; |
86 | addr = eaddr; | 95 | addr = eaddr; |
87 | end_addr = addr + PAGE_SIZE; | 96 | end_addr = addr + PAGE_SIZE; |
@@ -102,9 +111,10 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
102 | 111 | ||
103 | running_asid = get_asid(); | 112 | running_asid = get_asid(); |
104 | vma_asid = cpu_asid(cpu, vma->vm_mm); | 113 | vma_asid = cpu_asid(cpu, vma->vm_mm); |
105 | if (running_asid != vma_asid) | 114 | if (running_asid != vma_asid) { |
115 | local_irq_save(flags); | ||
106 | switch_and_save_asid(vma_asid); | 116 | switch_and_save_asid(vma_asid); |
107 | 117 | } | |
108 | while (addr < end_addr) { | 118 | while (addr < end_addr) { |
109 | /* Worth unrolling a little */ | 119 | /* Worth unrolling a little */ |
110 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); | 120 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); |
@@ -113,9 +123,10 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
113 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); | 123 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); |
114 | addr += 128; | 124 | addr += 128; |
115 | } | 125 | } |
116 | 126 | if (running_asid != vma_asid) { | |
117 | if (running_asid != vma_asid) | ||
118 | switch_and_save_asid(running_asid); | 127 | switch_and_save_asid(running_asid); |
128 | local_irq_restore(flags); | ||
129 | } | ||
119 | } | 130 | } |
120 | 131 | ||
121 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | 132 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, |
@@ -148,12 +159,16 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
148 | unsigned long eaddr; | 159 | unsigned long eaddr; |
149 | unsigned long after_last_page_start; | 160 | unsigned long after_last_page_start; |
150 | unsigned long mm_asid, current_asid; | 161 | unsigned long mm_asid, current_asid; |
162 | unsigned long flags = 0; | ||
151 | 163 | ||
152 | mm_asid = cpu_asid(smp_processor_id(), mm); | 164 | mm_asid = cpu_asid(smp_processor_id(), mm); |
153 | current_asid = get_asid(); | 165 | current_asid = get_asid(); |
154 | 166 | ||
155 | if (mm_asid != current_asid) | 167 | if (mm_asid != current_asid) { |
168 | /* Switch ASID and run the invalidate loop under cli */ | ||
169 | local_irq_save(flags); | ||
156 | switch_and_save_asid(mm_asid); | 170 | switch_and_save_asid(mm_asid); |
171 | } | ||
157 | 172 | ||
158 | aligned_start = start & PAGE_MASK; | 173 | aligned_start = start & PAGE_MASK; |
159 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); | 174 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); |
@@ -179,8 +194,10 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
179 | aligned_start = vma->vm_end; /* Skip to start of next region */ | 194 | aligned_start = vma->vm_end; /* Skip to start of next region */ |
180 | } | 195 | } |
181 | 196 | ||
182 | if (mm_asid != current_asid) | 197 | if (mm_asid != current_asid) { |
183 | switch_and_save_asid(current_asid); | 198 | switch_and_save_asid(current_asid); |
199 | local_irq_restore(flags); | ||
200 | } | ||
184 | } | 201 | } |
185 | } | 202 | } |
186 | 203 | ||