aboutsummaryrefslogtreecommitdiffstats
path: root/arch/avr32/mm/tlb.c
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2007-05-15 09:06:41 -0400
committerHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2008-07-02 05:01:28 -0400
commitb13d618b44fefea7529bd467e55423d353a599fc (patch)
treee5e7d404d0c8711087e00d12be6931a8982897f5 /arch/avr32/mm/tlb.c
parentd7ff2a4a28ceadc03df2f5a20897165fda306382 (diff)
avr32: Clean up and optimize the TLB operations
This and the following patches aim to optimize the code dealing with page tables and TLB operations. Each patch reduces the time it takes to gzip a 16 MB file slightly, but I expect things like fork() and mmap() will improve somewhat more. This patch deals with the low-level TLB operations: * Remove unused _TLBEHI_I define * Use gcc builtins instead of inline assembly * Remove a few unnecessary pipeline flushes and nops * Introduce NR_TLB_ENTRIES define and use it instead of hardcoding it to 32 a few places throughout the code. * Use sysreg bitops instead of hardcoded shifts and masks * Make a few needlessly global functions static Signed-off-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Diffstat (limited to 'arch/avr32/mm/tlb.c')
-rw-r--r--arch/avr32/mm/tlb.c175
1 files changed, 87 insertions, 88 deletions
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c
index cd12edbea9f2..06677be98ffb 100644
--- a/arch/avr32/mm/tlb.c
+++ b/arch/avr32/mm/tlb.c
@@ -11,21 +11,21 @@
11 11
12#include <asm/mmu_context.h> 12#include <asm/mmu_context.h>
13 13
14#define _TLBEHI_I 0x100 14/* TODO: Get the correct number from the CONFIG1 system register */
15#define NR_TLB_ENTRIES 32
15 16
16void show_dtlb_entry(unsigned int index) 17static void show_dtlb_entry(unsigned int index)
17{ 18{
18 unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; 19 u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
19 unsigned long flags; 20 unsigned long flags;
20 21
21 local_irq_save(flags); 22 local_irq_save(flags);
22 mmucr_save = sysreg_read(MMUCR); 23 mmucr_save = sysreg_read(MMUCR);
23 tlbehi_save = sysreg_read(TLBEHI); 24 tlbehi_save = sysreg_read(TLBEHI);
24 mmucr = mmucr_save & 0x13; 25 mmucr = SYSREG_BFINS(DRP, index, mmucr_save);
25 mmucr |= index << 14;
26 sysreg_write(MMUCR, mmucr); 26 sysreg_write(MMUCR, mmucr);
27 27
28 asm volatile("tlbr" : : : "memory"); 28 __builtin_tlbr();
29 cpu_sync_pipeline(); 29 cpu_sync_pipeline();
30 30
31 tlbehi = sysreg_read(TLBEHI); 31 tlbehi = sysreg_read(TLBEHI);
@@ -33,15 +33,17 @@ void show_dtlb_entry(unsigned int index)
33 33
34 printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", 34 printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
35 index, 35 index,
36 (tlbehi & 0x200)?'1':'0', 36 SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
37 (tlbelo & 0x100)?'1':'0', 37 SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
38 (tlbehi & 0xff), 38 SYSREG_BFEXT(ASID, tlbehi),
39 (tlbehi >> 12), (tlbelo >> 12), 39 SYSREG_BFEXT(VPN, tlbehi) >> 2,
40 (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, 40 SYSREG_BFEXT(PFN, tlbelo) >> 2,
41 (tlbelo & 0x200)?'1':'0', 41 SYSREG_BFEXT(AP, tlbelo),
42 (tlbelo & 0x080)?'1':'0', 42 SYSREG_BFEXT(SZ, tlbelo),
43 (tlbelo & 0x001)?'1':'0', 43 SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ',
44 (tlbelo & 0x002)?'1':'0'); 44 SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ',
45 SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ',
46 SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' ');
45 47
46 sysreg_write(MMUCR, mmucr_save); 48 sysreg_write(MMUCR, mmucr_save);
47 sysreg_write(TLBEHI, tlbehi_save); 49 sysreg_write(TLBEHI, tlbehi_save);
@@ -54,29 +56,33 @@ void dump_dtlb(void)
54 unsigned int i; 56 unsigned int i;
55 57
56 printk("ID V G ASID VPN PFN AP SZ C B W D\n"); 58 printk("ID V G ASID VPN PFN AP SZ C B W D\n");
57 for (i = 0; i < 32; i++) 59 for (i = 0; i < NR_TLB_ENTRIES; i++)
58 show_dtlb_entry(i); 60 show_dtlb_entry(i);
59} 61}
60 62
61static unsigned long last_mmucr; 63static void update_dtlb(unsigned long address, pte_t pte)
62
63static inline void set_replacement_pointer(unsigned shift)
64{ 64{
65 unsigned long mmucr, mmucr_save; 65 u32 tlbehi;
66 u32 mmucr;
66 67
67 mmucr = mmucr_save = sysreg_read(MMUCR); 68 /*
69 * We're not changing the ASID here, so no need to flush the
70 * pipeline.
71 */
72 tlbehi = sysreg_read(TLBEHI);
73 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
74 tlbehi |= address & MMU_VPN_MASK;
75 tlbehi |= SYSREG_BIT(TLBEHI_V);
76 sysreg_write(TLBEHI, tlbehi);
68 77
69 /* Does this mapping already exist? */ 78 /* Does this mapping already exist? */
70 __asm__ __volatile__( 79 __builtin_tlbs();
71 " tlbs\n" 80 mmucr = sysreg_read(MMUCR);
72 " mfsr %0, %1"
73 : "=r"(mmucr)
74 : "i"(SYSREG_MMUCR));
75 81
76 if (mmucr & SYSREG_BIT(MMUCR_N)) { 82 if (mmucr & SYSREG_BIT(MMUCR_N)) {
77 /* Not found -- pick a not-recently-accessed entry */ 83 /* Not found -- pick a not-recently-accessed entry */
78 unsigned long rp; 84 unsigned int rp;
79 unsigned long tlbar = sysreg_read(TLBARLO); 85 u32 tlbar = sysreg_read(TLBARLO);
80 86
81 rp = 32 - fls(tlbar); 87 rp = 32 - fls(tlbar);
82 if (rp == 32) { 88 if (rp == 32) {
@@ -84,30 +90,14 @@ static inline void set_replacement_pointer(unsigned shift)
84 sysreg_write(TLBARLO, -1L); 90 sysreg_write(TLBARLO, -1L);
85 } 91 }
86 92
87 mmucr &= 0x13; 93 mmucr = SYSREG_BFINS(DRP, rp, mmucr);
88 mmucr |= (rp << shift);
89
90 sysreg_write(MMUCR, mmucr); 94 sysreg_write(MMUCR, mmucr);
91 } 95 }
92 96
93 last_mmucr = mmucr;
94}
95
96static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid)
97{
98 unsigned long vpn;
99
100 vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid;
101 sysreg_write(TLBEHI, vpn);
102 cpu_sync_pipeline();
103
104 set_replacement_pointer(14);
105
106 sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); 97 sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
107 98
108 /* Let's go */ 99 /* Let's go */
109 asm volatile("nop\n\ttlbw" : : : "memory"); 100 __builtin_tlbw();
110 cpu_sync_pipeline();
111} 101}
112 102
113void update_mmu_cache(struct vm_area_struct *vma, 103void update_mmu_cache(struct vm_area_struct *vma,
@@ -120,39 +110,40 @@ void update_mmu_cache(struct vm_area_struct *vma,
120 return; 110 return;
121 111
122 local_irq_save(flags); 112 local_irq_save(flags);
123 update_dtlb(address, pte, get_asid()); 113 update_dtlb(address, pte);
124 local_irq_restore(flags); 114 local_irq_restore(flags);
125} 115}
126 116
127void __flush_tlb_page(unsigned long asid, unsigned long page) 117static void __flush_tlb_page(unsigned long asid, unsigned long page)
128{ 118{
129 unsigned long mmucr, tlbehi; 119 u32 mmucr, tlbehi;
130 120
131 page |= asid; 121 /*
132 sysreg_write(TLBEHI, page); 122 * Caller is responsible for masking out non-PFN bits in page
133 cpu_sync_pipeline(); 123 * and changing the current ASID if necessary. This means that
134 asm volatile("tlbs"); 124 * we don't need to flush the pipeline after writing TLBEHI.
125 */
126 tlbehi = page | asid;
127 sysreg_write(TLBEHI, tlbehi);
128
129 __builtin_tlbs();
135 mmucr = sysreg_read(MMUCR); 130 mmucr = sysreg_read(MMUCR);
136 131
137 if (!(mmucr & SYSREG_BIT(MMUCR_N))) { 132 if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
138 unsigned long tlbarlo; 133 unsigned int entry;
139 unsigned long entry; 134 u32 tlbarlo;
140 135
141 /* Clear the "valid" bit */ 136 /* Clear the "valid" bit */
142 tlbehi = sysreg_read(TLBEHI);
143 tlbehi &= ~_TLBEHI_VALID;
144 sysreg_write(TLBEHI, tlbehi); 137 sysreg_write(TLBEHI, tlbehi);
145 cpu_sync_pipeline();
146 138
147 /* mark the entry as "not accessed" */ 139 /* mark the entry as "not accessed" */
148 entry = (mmucr >> 14) & 0x3f; 140 entry = SYSREG_BFEXT(DRP, mmucr);
149 tlbarlo = sysreg_read(TLBARLO); 141 tlbarlo = sysreg_read(TLBARLO);
150 tlbarlo |= (0x80000000 >> entry); 142 tlbarlo |= (0x80000000UL >> entry);
151 sysreg_write(TLBARLO, tlbarlo); 143 sysreg_write(TLBARLO, tlbarlo);
152 144
153 /* update the entry with valid bit clear */ 145 /* update the entry with valid bit clear */
154 asm volatile("tlbw"); 146 __builtin_tlbw();
155 cpu_sync_pipeline();
156 } 147 }
157} 148}
158 149
@@ -190,17 +181,22 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
190 181
191 local_irq_save(flags); 182 local_irq_save(flags);
192 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 183 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
184
193 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ 185 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
194 mm->context = NO_CONTEXT; 186 mm->context = NO_CONTEXT;
195 if (mm == current->mm) 187 if (mm == current->mm)
196 activate_context(mm); 188 activate_context(mm);
197 } else { 189 } else {
198 unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; 190 unsigned long asid;
199 unsigned long saved_asid = MMU_NO_ASID; 191 unsigned long saved_asid;
192
193 asid = mm->context & MMU_CONTEXT_ASID_MASK;
194 saved_asid = MMU_NO_ASID;
200 195
201 start &= PAGE_MASK; 196 start &= PAGE_MASK;
202 end += (PAGE_SIZE - 1); 197 end += (PAGE_SIZE - 1);
203 end &= PAGE_MASK; 198 end &= PAGE_MASK;
199
204 if (mm != current->mm) { 200 if (mm != current->mm) {
205 saved_asid = get_asid(); 201 saved_asid = get_asid();
206 set_asid(asid); 202 set_asid(asid);
@@ -218,33 +214,34 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
218} 214}
219 215
220/* 216/*
221 * TODO: If this is only called for addresses > TASK_SIZE, we can probably 217 * This function depends on the pages to be flushed having the G
222 * skip the ASID stuff and just use the Global bit... 218 * (global) bit set in their pte. This is true for all
219 * PAGE_KERNEL(_RO) pages.
223 */ 220 */
224void flush_tlb_kernel_range(unsigned long start, unsigned long end) 221void flush_tlb_kernel_range(unsigned long start, unsigned long end)
225{ 222{
226 unsigned long flags; 223 unsigned long flags;
227 int size; 224 int size;
228 225
229 local_irq_save(flags);
230 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 226 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
231 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ 227 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
232 flush_tlb_all(); 228 flush_tlb_all();
233 } else { 229 } else {
234 unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; 230 unsigned long asid;
235 unsigned long saved_asid = get_asid(); 231
232 local_irq_save(flags);
233 asid = get_asid();
236 234
237 start &= PAGE_MASK; 235 start &= PAGE_MASK;
238 end += (PAGE_SIZE - 1); 236 end += (PAGE_SIZE - 1);
239 end &= PAGE_MASK; 237 end &= PAGE_MASK;
240 set_asid(asid); 238
241 while (start < end) { 239 while (start < end) {
242 __flush_tlb_page(asid, start); 240 __flush_tlb_page(asid, start);
243 start += PAGE_SIZE; 241 start += PAGE_SIZE;
244 } 242 }
245 set_asid(saved_asid); 243 local_irq_restore(flags);
246 } 244 }
247 local_irq_restore(flags);
248} 245}
249 246
250void flush_tlb_mm(struct mm_struct *mm) 247void flush_tlb_mm(struct mm_struct *mm)
@@ -280,7 +277,7 @@ static void *tlb_start(struct seq_file *tlb, loff_t *pos)
280{ 277{
281 static unsigned long tlb_index; 278 static unsigned long tlb_index;
282 279
283 if (*pos >= 32) 280 if (*pos >= NR_TLB_ENTRIES)
284 return NULL; 281 return NULL;
285 282
286 tlb_index = 0; 283 tlb_index = 0;
@@ -291,7 +288,7 @@ static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
291{ 288{
292 unsigned long *index = v; 289 unsigned long *index = v;
293 290
294 if (*index >= 31) 291 if (*index >= NR_TLB_ENTRIES - 1)
295 return NULL; 292 return NULL;
296 293
297 ++*pos; 294 ++*pos;
@@ -313,16 +310,16 @@ static int tlb_show(struct seq_file *tlb, void *v)
313 if (*index == 0) 310 if (*index == 0)
314 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); 311 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
315 312
316 BUG_ON(*index >= 32); 313 BUG_ON(*index >= NR_TLB_ENTRIES);
317 314
318 local_irq_save(flags); 315 local_irq_save(flags);
319 mmucr_save = sysreg_read(MMUCR); 316 mmucr_save = sysreg_read(MMUCR);
320 tlbehi_save = sysreg_read(TLBEHI); 317 tlbehi_save = sysreg_read(TLBEHI);
321 mmucr = mmucr_save & 0x13; 318 mmucr = SYSREG_BFINS(DRP, *index, mmucr_save);
322 mmucr |= *index << 14;
323 sysreg_write(MMUCR, mmucr); 319 sysreg_write(MMUCR, mmucr);
324 320
325 asm volatile("tlbr" : : : "memory"); 321 /* TLBR might change the ASID */
322 __builtin_tlbr();
326 cpu_sync_pipeline(); 323 cpu_sync_pipeline();
327 324
328 tlbehi = sysreg_read(TLBEHI); 325 tlbehi = sysreg_read(TLBEHI);
@@ -334,16 +331,18 @@ static int tlb_show(struct seq_file *tlb, void *v)
334 local_irq_restore(flags); 331 local_irq_restore(flags);
335 332
336 seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", 333 seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
337 *index, 334 *index,
338 (tlbehi & 0x200)?'1':'0', 335 SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
339 (tlbelo & 0x100)?'1':'0', 336 SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
340 (tlbehi & 0xff), 337 SYSREG_BFEXT(ASID, tlbehi),
341 (tlbehi >> 12), (tlbelo >> 12), 338 SYSREG_BFEXT(VPN, tlbehi) >> 2,
342 (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, 339 SYSREG_BFEXT(PFN, tlbelo) >> 2,
343 (tlbelo & 0x200)?'1':'0', 340 SYSREG_BFEXT(AP, tlbelo),
344 (tlbelo & 0x080)?'1':'0', 341 SYSREG_BFEXT(SZ, tlbelo),
345 (tlbelo & 0x001)?'1':'0', 342 SYSREG_BFEXT(TLBELO_C, tlbelo) ? '1' : '0',
346 (tlbelo & 0x002)?'1':'0'); 343 SYSREG_BFEXT(B, tlbelo) ? '1' : '0',
344 SYSREG_BFEXT(W, tlbelo) ? '1' : '0',
345 SYSREG_BFEXT(TLBELO_D, tlbelo) ? '1' : '0');
347 346
348 return 0; 347 return 0;
349} 348}