diff options
Diffstat (limited to 'arch/mips/mm/tlb-r4k.c')
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 85 |
1 files changed, 68 insertions, 17 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index a865f2394cb0..9dca099ba16b 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
| @@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void); | |||
| 32 | "nop; nop; nop; nop; nop; nop;\n\t" \ | 32 | "nop; nop; nop; nop; nop; nop;\n\t" \ |
| 33 | ".set reorder\n\t") | 33 | ".set reorder\n\t") |
| 34 | 34 | ||
| 35 | /* Atomicity and interruptability */ | ||
| 36 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 37 | |||
| 38 | #include <asm/smtc.h> | ||
| 39 | #include <asm/mipsmtregs.h> | ||
| 40 | |||
| 41 | #define ENTER_CRITICAL(flags) \ | ||
| 42 | { \ | ||
| 43 | unsigned int mvpflags; \ | ||
| 44 | local_irq_save(flags);\ | ||
| 45 | mvpflags = dvpe() | ||
| 46 | #define EXIT_CRITICAL(flags) \ | ||
| 47 | evpe(mvpflags); \ | ||
| 48 | local_irq_restore(flags); \ | ||
| 49 | } | ||
| 50 | #else | ||
| 51 | |||
| 52 | #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||
| 53 | #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||
| 54 | |||
| 55 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 56 | |||
| 35 | void local_flush_tlb_all(void) | 57 | void local_flush_tlb_all(void) |
| 36 | { | 58 | { |
| 37 | unsigned long flags; | 59 | unsigned long flags; |
| 38 | unsigned long old_ctx; | 60 | unsigned long old_ctx; |
| 39 | int entry; | 61 | int entry; |
| 40 | 62 | ||
| 41 | local_irq_save(flags); | 63 | ENTER_CRITICAL(flags); |
| 42 | /* Save old context and create impossible VPN2 value */ | 64 | /* Save old context and create impossible VPN2 value */ |
| 43 | old_ctx = read_c0_entryhi(); | 65 | old_ctx = read_c0_entryhi(); |
| 44 | write_c0_entrylo0(0); | 66 | write_c0_entrylo0(0); |
| @@ -57,7 +79,7 @@ void local_flush_tlb_all(void) | |||
| 57 | } | 79 | } |
| 58 | tlbw_use_hazard(); | 80 | tlbw_use_hazard(); |
| 59 | write_c0_entryhi(old_ctx); | 81 | write_c0_entryhi(old_ctx); |
| 60 | local_irq_restore(flags); | 82 | EXIT_CRITICAL(flags); |
| 61 | } | 83 | } |
| 62 | 84 | ||
| 63 | /* All entries common to a mm share an asid. To effectively flush | 85 | /* All entries common to a mm share an asid. To effectively flush |
| @@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
| 87 | unsigned long flags; | 109 | unsigned long flags; |
| 88 | int size; | 110 | int size; |
| 89 | 111 | ||
| 112 | ENTER_CRITICAL(flags); | ||
| 90 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 113 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 91 | size = (size + 1) >> 1; | 114 | size = (size + 1) >> 1; |
| 92 | local_irq_save(flags); | 115 | local_irq_save(flags); |
| @@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
| 120 | } else { | 143 | } else { |
| 121 | drop_mmu_context(mm, cpu); | 144 | drop_mmu_context(mm, cpu); |
| 122 | } | 145 | } |
| 123 | local_irq_restore(flags); | 146 | EXIT_CRITICAL(flags); |
| 124 | } | 147 | } |
| 125 | } | 148 | } |
| 126 | 149 | ||
| @@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 129 | unsigned long flags; | 152 | unsigned long flags; |
| 130 | int size; | 153 | int size; |
| 131 | 154 | ||
| 155 | ENTER_CRITICAL(flags); | ||
| 132 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 156 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 133 | size = (size + 1) >> 1; | 157 | size = (size + 1) >> 1; |
| 134 | local_irq_save(flags); | ||
| 135 | if (size <= current_cpu_data.tlbsize / 2) { | 158 | if (size <= current_cpu_data.tlbsize / 2) { |
| 136 | int pid = read_c0_entryhi(); | 159 | int pid = read_c0_entryhi(); |
| 137 | 160 | ||
| @@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 162 | } else { | 185 | } else { |
| 163 | local_flush_tlb_all(); | 186 | local_flush_tlb_all(); |
| 164 | } | 187 | } |
| 165 | local_irq_restore(flags); | 188 | EXIT_CRITICAL(flags); |
| 166 | } | 189 | } |
| 167 | 190 | ||
| 168 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 191 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| @@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
| 175 | 198 | ||
| 176 | newpid = cpu_asid(cpu, vma->vm_mm); | 199 | newpid = cpu_asid(cpu, vma->vm_mm); |
| 177 | page &= (PAGE_MASK << 1); | 200 | page &= (PAGE_MASK << 1); |
| 178 | local_irq_save(flags); | 201 | ENTER_CRITICAL(flags); |
| 179 | oldpid = read_c0_entryhi(); | 202 | oldpid = read_c0_entryhi(); |
| 180 | write_c0_entryhi(page | newpid); | 203 | write_c0_entryhi(page | newpid); |
| 181 | mtc0_tlbw_hazard(); | 204 | mtc0_tlbw_hazard(); |
| @@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
| 194 | 217 | ||
| 195 | finish: | 218 | finish: |
| 196 | write_c0_entryhi(oldpid); | 219 | write_c0_entryhi(oldpid); |
| 197 | local_irq_restore(flags); | 220 | EXIT_CRITICAL(flags); |
| 198 | } | 221 | } |
| 199 | } | 222 | } |
| 200 | 223 | ||
| @@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page) | |||
| 207 | unsigned long flags; | 230 | unsigned long flags; |
| 208 | int oldpid, idx; | 231 | int oldpid, idx; |
| 209 | 232 | ||
| 210 | local_irq_save(flags); | 233 | ENTER_CRITICAL(flags); |
| 211 | oldpid = read_c0_entryhi(); | 234 | oldpid = read_c0_entryhi(); |
| 212 | page &= (PAGE_MASK << 1); | 235 | page &= (PAGE_MASK << 1); |
| 213 | write_c0_entryhi(page); | 236 | write_c0_entryhi(page); |
| @@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page) | |||
| 226 | } | 249 | } |
| 227 | write_c0_entryhi(oldpid); | 250 | write_c0_entryhi(oldpid); |
| 228 | 251 | ||
| 229 | local_irq_restore(flags); | 252 | EXIT_CRITICAL(flags); |
| 230 | } | 253 | } |
| 231 | 254 | ||
| 232 | /* | 255 | /* |
| @@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
| 249 | if (current->active_mm != vma->vm_mm) | 272 | if (current->active_mm != vma->vm_mm) |
| 250 | return; | 273 | return; |
| 251 | 274 | ||
| 252 | local_irq_save(flags); | 275 | ENTER_CRITICAL(flags); |
| 253 | 276 | ||
| 254 | pid = read_c0_entryhi() & ASID_MASK; | 277 | pid = read_c0_entryhi() & ASID_MASK; |
| 255 | address &= (PAGE_MASK << 1); | 278 | address &= (PAGE_MASK << 1); |
| @@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
| 277 | else | 300 | else |
| 278 | tlb_write_indexed(); | 301 | tlb_write_indexed(); |
| 279 | tlbw_use_hazard(); | 302 | tlbw_use_hazard(); |
| 280 | local_irq_restore(flags); | 303 | EXIT_CRITICAL(flags); |
| 281 | } | 304 | } |
| 282 | 305 | ||
| 283 | #if 0 | 306 | #if 0 |
| @@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, | |||
| 291 | pte_t *ptep; | 314 | pte_t *ptep; |
| 292 | int idx; | 315 | int idx; |
| 293 | 316 | ||
| 294 | local_irq_save(flags); | 317 | ENTER_CRITICAL(flags); |
| 295 | address &= (PAGE_MASK << 1); | 318 | address &= (PAGE_MASK << 1); |
| 296 | asid = read_c0_entryhi() & ASID_MASK; | 319 | asid = read_c0_entryhi() & ASID_MASK; |
| 297 | write_c0_entryhi(address | asid); | 320 | write_c0_entryhi(address | asid); |
| @@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, | |||
| 310 | else | 333 | else |
| 311 | tlb_write_indexed(); | 334 | tlb_write_indexed(); |
| 312 | tlbw_use_hazard(); | 335 | tlbw_use_hazard(); |
| 313 | local_irq_restore(flags); | 336 | EXIT_CRITICAL(flags); |
| 314 | } | 337 | } |
| 315 | #endif | 338 | #endif |
| 316 | 339 | ||
| @@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 322 | unsigned long old_pagemask; | 345 | unsigned long old_pagemask; |
| 323 | unsigned long old_ctx; | 346 | unsigned long old_ctx; |
| 324 | 347 | ||
| 325 | local_irq_save(flags); | 348 | ENTER_CRITICAL(flags); |
| 326 | /* Save old context and create impossible VPN2 value */ | 349 | /* Save old context and create impossible VPN2 value */ |
| 327 | old_ctx = read_c0_entryhi(); | 350 | old_ctx = read_c0_entryhi(); |
| 328 | old_pagemask = read_c0_pagemask(); | 351 | old_pagemask = read_c0_pagemask(); |
| @@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 342 | BARRIER; | 365 | BARRIER; |
| 343 | write_c0_pagemask(old_pagemask); | 366 | write_c0_pagemask(old_pagemask); |
| 344 | local_flush_tlb_all(); | 367 | local_flush_tlb_all(); |
| 345 | local_irq_restore(flags); | 368 | EXIT_CRITICAL(flags); |
| 346 | } | 369 | } |
| 347 | 370 | ||
| 348 | /* | 371 | /* |
| @@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 362 | unsigned long old_pagemask; | 385 | unsigned long old_pagemask; |
| 363 | unsigned long old_ctx; | 386 | unsigned long old_ctx; |
| 364 | 387 | ||
| 365 | local_irq_save(flags); | 388 | ENTER_CRITICAL(flags); |
| 366 | /* Save old context and create impossible VPN2 value */ | 389 | /* Save old context and create impossible VPN2 value */ |
| 367 | old_ctx = read_c0_entryhi(); | 390 | old_ctx = read_c0_entryhi(); |
| 368 | old_pagemask = read_c0_pagemask(); | 391 | old_pagemask = read_c0_pagemask(); |
| @@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 386 | write_c0_entryhi(old_ctx); | 409 | write_c0_entryhi(old_ctx); |
| 387 | write_c0_pagemask(old_pagemask); | 410 | write_c0_pagemask(old_pagemask); |
| 388 | out: | 411 | out: |
| 389 | local_irq_restore(flags); | 412 | EXIT_CRITICAL(flags); |
| 390 | return ret; | 413 | return ret; |
| 391 | } | 414 | } |
| 392 | 415 | ||
| 416 | extern void __init sanitize_tlb_entries(void); | ||
| 393 | static void __init probe_tlb(unsigned long config) | 417 | static void __init probe_tlb(unsigned long config) |
| 394 | { | 418 | { |
| 395 | struct cpuinfo_mips *c = ¤t_cpu_data; | 419 | struct cpuinfo_mips *c = ¤t_cpu_data; |
| @@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config) | |||
| 402 | */ | 426 | */ |
| 403 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) | 427 | if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) |
| 404 | return; | 428 | return; |
| 429 | #ifdef CONFIG_MIPS_MT_SMTC | ||
| 430 | /* | ||
| 431 | * If TLB is shared in SMTC system, total size already | ||
| 432 | * has been calculated and written into cpu_data tlbsize | ||
| 433 | */ | ||
| 434 | if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED) | ||
| 435 | return; | ||
| 436 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
| 405 | 437 | ||
| 406 | reg = read_c0_config1(); | 438 | reg = read_c0_config1(); |
| 407 | if (!((config >> 7) & 3)) | 439 | if (!((config >> 7) & 3)) |
| @@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config) | |||
| 410 | c->tlbsize = ((reg >> 25) & 0x3f) + 1; | 442 | c->tlbsize = ((reg >> 25) & 0x3f) + 1; |
| 411 | } | 443 | } |
| 412 | 444 | ||
| 445 | static int __initdata ntlb = 0; | ||
| 446 | static int __init set_ntlb(char *str) | ||
| 447 | { | ||
| 448 | get_option(&str, &ntlb); | ||
| 449 | return 1; | ||
| 450 | } | ||
| 451 | |||
| 452 | __setup("ntlb=", set_ntlb); | ||
| 453 | |||
| 413 | void __init tlb_init(void) | 454 | void __init tlb_init(void) |
| 414 | { | 455 | { |
| 415 | unsigned int config = read_c0_config(); | 456 | unsigned int config = read_c0_config(); |
| @@ -432,5 +473,15 @@ void __init tlb_init(void) | |||
| 432 | 473 | ||
| 433 | /* Did I tell you that ARC SUCKS? */ | 474 | /* Did I tell you that ARC SUCKS? */ |
| 434 | 475 | ||
| 476 | if (ntlb) { | ||
| 477 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { | ||
| 478 | int wired = current_cpu_data.tlbsize - ntlb; | ||
| 479 | write_c0_wired(wired); | ||
| 480 | write_c0_index(wired-1); | ||
| 481 | printk ("Restricting TLB to %d entries\n", ntlb); | ||
| 482 | } else | ||
| 483 | printk("Ignoring invalid argument ntlb=%d\n", ntlb); | ||
| 484 | } | ||
| 485 | |||
| 435 | build_tlb_refill_handler(); | 486 | build_tlb_refill_handler(); |
| 436 | } | 487 | } |
