diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-08 01:54:40 -0400 |
---|---|---|
committer | Josh Boyer <jwboyer@linux.vnet.ibm.com> | 2008-07-09 13:36:17 -0400 |
commit | 1bc54c03117b90716e0dedd7abb2a20405de65df (patch) | |
tree | 8e82fd610abaff36f1e20b5aaaf7bdeaee883aac /arch/powerpc | |
parent | beae4c03c0fe69cf7d57518aa0572ad21730b8be (diff) |
powerpc: rework 4xx PTE access and TLB miss
This is some preliminary work to improve TLB management on SW loaded
TLB powerpc platforms. This introduce support for non-atomic PTE
operations in pgtable-ppc32.h and removes write back to the PTE from
the TLB miss handlers. In addition, the DSI interrupt code no longer
tries to fixup write permission, this is left to generic code, and
_PAGE_HWWRITE is gone.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 286 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_booke.h | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 29 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 3 |
4 files changed, 134 insertions, 192 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 2944529e8bf9..f3a1ea9d7fe4 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -293,119 +293,9 @@ interrupt_base: | |||
293 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) | 293 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) |
294 | 294 | ||
295 | /* Data Storage Interrupt */ | 295 | /* Data Storage Interrupt */ |
296 | START_EXCEPTION(DataStorage) | 296 | DATA_STORAGE_EXCEPTION |
297 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
298 | mtspr SPRN_SPRG1, r11 | ||
299 | mtspr SPRN_SPRG4W, r12 | ||
300 | mtspr SPRN_SPRG5W, r13 | ||
301 | mfcr r11 | ||
302 | mtspr SPRN_SPRG7W, r11 | ||
303 | |||
304 | /* | ||
305 | * Check if it was a store fault, if not then bail | ||
306 | * because a user tried to access a kernel or | ||
307 | * read-protected page. Otherwise, get the | ||
308 | * offending address and handle it. | ||
309 | */ | ||
310 | mfspr r10, SPRN_ESR | ||
311 | andis. r10, r10, ESR_ST@h | ||
312 | beq 2f | ||
313 | |||
314 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
315 | |||
316 | /* If we are faulting a kernel address, we have to use the | ||
317 | * kernel page tables. | ||
318 | */ | ||
319 | lis r11, PAGE_OFFSET@h | ||
320 | cmplw r10, r11 | ||
321 | blt+ 3f | ||
322 | lis r11, swapper_pg_dir@h | ||
323 | ori r11, r11, swapper_pg_dir@l | ||
324 | |||
325 | mfspr r12,SPRN_MMUCR | ||
326 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
327 | |||
328 | b 4f | ||
329 | |||
330 | /* Get the PGD for the current thread */ | ||
331 | 3: | ||
332 | mfspr r11,SPRN_SPRG3 | ||
333 | lwz r11,PGDIR(r11) | ||
334 | |||
335 | /* Load PID into MMUCR TID */ | ||
336 | mfspr r12,SPRN_MMUCR /* Get MMUCR */ | ||
337 | mfspr r13,SPRN_PID /* Get PID */ | ||
338 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
339 | |||
340 | 4: | ||
341 | mtspr SPRN_MMUCR,r12 | ||
342 | |||
343 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
344 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
345 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
346 | beq 2f /* Bail if no table */ | ||
347 | |||
348 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
349 | lwz r11, 4(r12) /* Get pte entry */ | ||
350 | |||
351 | andi. r13, r11, _PAGE_RW /* Is it writeable? */ | ||
352 | beq 2f /* Bail if not */ | ||
353 | |||
354 | /* Update 'changed'. | ||
355 | */ | ||
356 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
357 | stw r11, 4(r12) /* Update Linux page table */ | ||
358 | |||
359 | li r13, PPC44x_TLB_SR@l /* Set SR */ | ||
360 | rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
361 | rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ | ||
362 | rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ | ||
363 | rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
364 | rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ | ||
365 | and r12, r12, r11 /* HWEXEC/RW & USER */ | ||
366 | rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
367 | rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ | ||
368 | |||
369 | rlwimi r11,r13,0,26,31 /* Insert static perms */ | ||
370 | |||
371 | /* | ||
372 | * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added | ||
373 | * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see | ||
374 | * include/asm-powerpc/pgtable-ppc32.h for details). | ||
375 | */ | ||
376 | rlwinm r11,r11,0,20,10 | ||
377 | |||
378 | /* find the TLB index that caused the fault. It has to be here. */ | ||
379 | tlbsx r10, 0, r10 | ||
380 | |||
381 | tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
382 | |||
383 | /* Done...restore registers and get out of here. | ||
384 | */ | ||
385 | mfspr r11, SPRN_SPRG7R | ||
386 | mtcr r11 | ||
387 | mfspr r13, SPRN_SPRG5R | ||
388 | mfspr r12, SPRN_SPRG4R | ||
389 | 297 | ||
390 | mfspr r11, SPRN_SPRG1 | 298 | /* Instruction Storage Interrupt */ |
391 | mfspr r10, SPRN_SPRG0 | ||
392 | rfi /* Force context change */ | ||
393 | |||
394 | 2: | ||
395 | /* | ||
396 | * The bailout. Restore registers to pre-exception conditions | ||
397 | * and call the heavyweights to help us out. | ||
398 | */ | ||
399 | mfspr r11, SPRN_SPRG7R | ||
400 | mtcr r11 | ||
401 | mfspr r13, SPRN_SPRG5R | ||
402 | mfspr r12, SPRN_SPRG4R | ||
403 | |||
404 | mfspr r11, SPRN_SPRG1 | ||
405 | mfspr r10, SPRN_SPRG0 | ||
406 | b data_access | ||
407 | |||
408 | /* Instruction Storage Interrupt */ | ||
409 | INSTRUCTION_STORAGE_EXCEPTION | 299 | INSTRUCTION_STORAGE_EXCEPTION |
410 | 300 | ||
411 | /* External Input Interrupt */ | 301 | /* External Input Interrupt */ |
@@ -423,7 +313,6 @@ interrupt_base: | |||
423 | #else | 313 | #else |
424 | EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) | 314 | EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) |
425 | #endif | 315 | #endif |
426 | |||
427 | /* System Call Interrupt */ | 316 | /* System Call Interrupt */ |
428 | START_EXCEPTION(SystemCall) | 317 | START_EXCEPTION(SystemCall) |
429 | NORMAL_EXCEPTION_PROLOG | 318 | NORMAL_EXCEPTION_PROLOG |
@@ -484,18 +373,57 @@ interrupt_base: | |||
484 | 4: | 373 | 4: |
485 | mtspr SPRN_MMUCR,r12 | 374 | mtspr SPRN_MMUCR,r12 |
486 | 375 | ||
376 | /* Mask of required permission bits. Note that while we | ||
377 | * do copy ESR:ST to _PAGE_RW position as trying to write | ||
378 | * to an RO page is pretty common, we don't do it with | ||
379 | * _PAGE_DIRTY. We could do it, but it's a fairly rare | ||
380 | * event so I'd rather take the overhead when it happens | ||
381 | * rather than adding an instruction here. We should measure | ||
382 | * whether the whole thing is worth it in the first place | ||
383 | * as we could avoid loading SPRN_ESR completely in the first | ||
384 | * place... | ||
385 | * | ||
386 | * TODO: Is it worth doing that mfspr & rlwimi in the first | ||
387 | * place or can we save a couple of instructions here ? | ||
388 | */ | ||
389 | mfspr r12,SPRN_ESR | ||
390 | li r13,_PAGE_PRESENT|_PAGE_ACCESSED | ||
391 | rlwimi r13,r12,10,30,30 | ||
392 | |||
393 | /* Load the PTE */ | ||
487 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | 394 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ |
488 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | 395 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ |
489 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | 396 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ |
490 | beq 2f /* Bail if no table */ | 397 | beq 2f /* Bail if no table */ |
491 | 398 | ||
492 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | 399 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ |
493 | lwz r11, 4(r12) /* Get pte entry */ | 400 | lwz r11, 0(r12) /* Get high word of pte entry */ |
494 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | 401 | lwz r12, 4(r12) /* Get low word of pte entry */ |
495 | beq 2f /* Bail if not present */ | ||
496 | 402 | ||
497 | ori r11, r11, _PAGE_ACCESSED | 403 | lis r10,tlb_44x_index@ha |
498 | stw r11, 4(r12) | 404 | |
405 | andc. r13,r13,r12 /* Check permission */ | ||
406 | |||
407 | /* Load the next available TLB index */ | ||
408 | lwz r13,tlb_44x_index@l(r10) | ||
409 | |||
410 | bne 2f /* Bail if permission mismach */ | ||
411 | |||
412 | /* Increment, rollover, and store TLB index */ | ||
413 | addi r13,r13,1 | ||
414 | |||
415 | /* Compare with watermark (instruction gets patched) */ | ||
416 | .globl tlb_44x_patch_hwater_D | ||
417 | tlb_44x_patch_hwater_D: | ||
418 | cmpwi 0,r13,1 /* reserve entries */ | ||
419 | ble 5f | ||
420 | li r13,0 | ||
421 | 5: | ||
422 | /* Store the next available TLB index */ | ||
423 | stw r13,tlb_44x_index@l(r10) | ||
424 | |||
425 | /* Re-load the faulting address */ | ||
426 | mfspr r10,SPRN_DEAR | ||
499 | 427 | ||
500 | /* Jump to common tlb load */ | 428 | /* Jump to common tlb load */ |
501 | b finish_tlb_load | 429 | b finish_tlb_load |
@@ -510,7 +438,7 @@ interrupt_base: | |||
510 | mfspr r12, SPRN_SPRG4R | 438 | mfspr r12, SPRN_SPRG4R |
511 | mfspr r11, SPRN_SPRG1 | 439 | mfspr r11, SPRN_SPRG1 |
512 | mfspr r10, SPRN_SPRG0 | 440 | mfspr r10, SPRN_SPRG0 |
513 | b data_access | 441 | b DataStorage |
514 | 442 | ||
515 | /* Instruction TLB Error Interrupt */ | 443 | /* Instruction TLB Error Interrupt */ |
516 | /* | 444 | /* |
@@ -554,18 +482,42 @@ interrupt_base: | |||
554 | 4: | 482 | 4: |
555 | mtspr SPRN_MMUCR,r12 | 483 | mtspr SPRN_MMUCR,r12 |
556 | 484 | ||
485 | /* Make up the required permissions */ | ||
486 | li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC | ||
487 | |||
557 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | 488 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ |
558 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | 489 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ |
559 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | 490 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ |
560 | beq 2f /* Bail if no table */ | 491 | beq 2f /* Bail if no table */ |
561 | 492 | ||
562 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | 493 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ |
563 | lwz r11, 4(r12) /* Get pte entry */ | 494 | lwz r11, 0(r12) /* Get high word of pte entry */ |
564 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | 495 | lwz r12, 4(r12) /* Get low word of pte entry */ |
565 | beq 2f /* Bail if not present */ | ||
566 | 496 | ||
567 | ori r11, r11, _PAGE_ACCESSED | 497 | lis r10,tlb_44x_index@ha |
568 | stw r11, 4(r12) | 498 | |
499 | andc. r13,r13,r12 /* Check permission */ | ||
500 | |||
501 | /* Load the next available TLB index */ | ||
502 | lwz r13,tlb_44x_index@l(r10) | ||
503 | |||
504 | bne 2f /* Bail if permission mismach */ | ||
505 | |||
506 | /* Increment, rollover, and store TLB index */ | ||
507 | addi r13,r13,1 | ||
508 | |||
509 | /* Compare with watermark (instruction gets patched) */ | ||
510 | .globl tlb_44x_patch_hwater_I | ||
511 | tlb_44x_patch_hwater_I: | ||
512 | cmpwi 0,r13,1 /* reserve entries */ | ||
513 | ble 5f | ||
514 | li r13,0 | ||
515 | 5: | ||
516 | /* Store the next available TLB index */ | ||
517 | stw r13,tlb_44x_index@l(r10) | ||
518 | |||
519 | /* Re-load the faulting address */ | ||
520 | mfspr r10,SPRN_SRR0 | ||
569 | 521 | ||
570 | /* Jump to common TLB load point */ | 522 | /* Jump to common TLB load point */ |
571 | b finish_tlb_load | 523 | b finish_tlb_load |
@@ -587,86 +539,40 @@ interrupt_base: | |||
587 | 539 | ||
588 | /* | 540 | /* |
589 | * Local functions | 541 | * Local functions |
590 | */ | 542 | */ |
591 | /* | ||
592 | * Data TLB exceptions will bail out to this point | ||
593 | * if they can't resolve the lightweight TLB fault. | ||
594 | */ | ||
595 | data_access: | ||
596 | NORMAL_EXCEPTION_PROLOG | ||
597 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
598 | stw r5,_ESR(r11) | ||
599 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
600 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
601 | 543 | ||
602 | /* | 544 | /* |
603 | 545 | ||
604 | * Both the instruction and data TLB miss get to this | 546 | * Both the instruction and data TLB miss get to this |
605 | * point to load the TLB. | 547 | * point to load the TLB. |
606 | * r10 - EA of fault | 548 | * r10 - EA of fault |
607 | * r11 - available to use | 549 | * r11 - PTE high word value |
608 | * r12 - Pointer to the 64-bit PTE | 550 | * r12 - PTE low word value |
609 | * r13 - available to use | 551 | * r13 - TLB index |
610 | * MMUCR - loaded with proper value when we get here | 552 | * MMUCR - loaded with proper value when we get here |
611 | * Upon exit, we reload everything and RFI. | 553 | * Upon exit, we reload everything and RFI. |
612 | */ | 554 | */ |
613 | finish_tlb_load: | 555 | finish_tlb_load: |
614 | /* | 556 | /* Combine RPN & ERPN an write WS 0 */ |
615 | * We set execute, because we don't have the granularity to | 557 | rlwimi r11,r12,0,0,19 |
616 | * properly set this at the page level (Linux problem). | 558 | tlbwe r11,r13,PPC44x_TLB_XLAT |
617 | * If shared is set, we cause a zero PID->TID load. | ||
618 | * Many of these bits are software only. Bits we don't set | ||
619 | * here we (properly should) assume have the appropriate value. | ||
620 | */ | ||
621 | |||
622 | /* Load the next available TLB index */ | ||
623 | lis r13, tlb_44x_index@ha | ||
624 | lwz r13, tlb_44x_index@l(r13) | ||
625 | /* Load the TLB high watermark */ | ||
626 | lis r11, tlb_44x_hwater@ha | ||
627 | lwz r11, tlb_44x_hwater@l(r11) | ||
628 | |||
629 | /* Increment, rollover, and store TLB index */ | ||
630 | addi r13, r13, 1 | ||
631 | cmpw 0, r13, r11 /* reserve entries */ | ||
632 | ble 7f | ||
633 | li r13, 0 | ||
634 | 7: | ||
635 | /* Store the next available TLB index */ | ||
636 | lis r11, tlb_44x_index@ha | ||
637 | stw r13, tlb_44x_index@l(r11) | ||
638 | |||
639 | lwz r11, 0(r12) /* Get MS word of PTE */ | ||
640 | lwz r12, 4(r12) /* Get LS word of PTE */ | ||
641 | rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ | ||
642 | tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ | ||
643 | 559 | ||
644 | /* | 560 | /* |
645 | * Create PAGEID. This is the faulting address, | 561 | * Create WS1. This is the faulting address (EPN), |
646 | * page size, and valid flag. | 562 | * page size, and valid flag. |
647 | */ | 563 | */ |
648 | li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K | 564 | li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K |
649 | rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ | 565 | rlwimi r10,r11,0,20,31 /* Insert valid and page size*/ |
650 | tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ | 566 | tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ |
651 | 567 | ||
652 | li r10, PPC44x_TLB_SR@l /* Set SR */ | 568 | /* And WS 2 */ |
653 | rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ | 569 | li r10,0xf85 /* Mask to apply from PTE */ |
654 | rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | 570 | rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ |
655 | rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ | 571 | and r11,r12,r10 /* Mask PTE bits to keep */ |
656 | rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | 572 | andi. r10,r12,_PAGE_USER /* User page ? */ |
657 | and r11, r12, r11 /* HWEXEC & USER */ | 573 | beq 1f /* nope, leave U bits empty */ |
658 | rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ | 574 | rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ |
659 | 575 | 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ | |
660 | rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ | ||
661 | |||
662 | /* | ||
663 | * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added | ||
664 | * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see | ||
665 | * include/asm-powerpc/pgtable-ppc32.h for details). | ||
666 | */ | ||
667 | rlwinm r12, r12, 0, 20, 10 | ||
668 | |||
669 | tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
670 | 576 | ||
671 | /* Done...restore registers and get out of here. | 577 | /* Done...restore registers and get out of here. |
672 | */ | 578 | */ |
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index b0461be1c928..fce2df988504 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h | |||
@@ -340,6 +340,14 @@ label: | |||
340 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 340 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
341 | EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) | 341 | EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) |
342 | 342 | ||
343 | #define DATA_STORAGE_EXCEPTION \ | ||
344 | START_EXCEPTION(DataStorage) \ | ||
345 | NORMAL_EXCEPTION_PROLOG; \ | ||
346 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ | ||
347 | stw r5,_ESR(r11); \ | ||
348 | mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ | ||
349 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
350 | |||
343 | #define INSTRUCTION_STORAGE_EXCEPTION \ | 351 | #define INSTRUCTION_STORAGE_EXCEPTION \ |
344 | START_EXCEPTION(InstructionStorage) \ | 352 | START_EXCEPTION(InstructionStorage) \ |
345 | NORMAL_EXCEPTION_PROLOG; \ | 353 | NORMAL_EXCEPTION_PROLOG; \ |
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 953fb919eb06..98052ac96580 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/mmu.h> | 27 | #include <asm/mmu.h> |
28 | #include <asm/system.h> | 28 | #include <asm/system.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | #include <asm/cacheflush.h> | ||
30 | 31 | ||
31 | #include "mmu_decl.h" | 32 | #include "mmu_decl.h" |
32 | 33 | ||
@@ -37,11 +38,35 @@ unsigned int tlb_44x_index; /* = 0 */ | |||
37 | unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; | 38 | unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; |
38 | int icache_44x_need_flush; | 39 | int icache_44x_need_flush; |
39 | 40 | ||
41 | static void __init ppc44x_update_tlb_hwater(void) | ||
42 | { | ||
43 | extern unsigned int tlb_44x_patch_hwater_D[]; | ||
44 | extern unsigned int tlb_44x_patch_hwater_I[]; | ||
45 | |||
46 | /* The TLB miss handlers hard codes the watermark in a cmpli | ||
47 | * instruction to improve performances rather than loading it | ||
48 | * from the global variable. Thus, we patch the instructions | ||
49 | * in the 2 TLB miss handlers when updating the value | ||
50 | */ | ||
51 | tlb_44x_patch_hwater_D[0] = (tlb_44x_patch_hwater_D[0] & 0xffff0000) | | ||
52 | tlb_44x_hwater; | ||
53 | flush_icache_range((unsigned long)&tlb_44x_patch_hwater_D[0], | ||
54 | (unsigned long)&tlb_44x_patch_hwater_D[1]); | ||
55 | tlb_44x_patch_hwater_I[0] = (tlb_44x_patch_hwater_I[0] & 0xffff0000) | | ||
56 | tlb_44x_hwater; | ||
57 | flush_icache_range((unsigned long)&tlb_44x_patch_hwater_I[0], | ||
58 | (unsigned long)&tlb_44x_patch_hwater_I[1]); | ||
59 | } | ||
60 | |||
40 | /* | 61 | /* |
41 | * "Pins" a 256MB TLB entry in AS0 for kernel lowmem | 62 | * "Pins" a 256MB TLB entry in AS0 for kernel lowmem |
42 | */ | 63 | */ |
43 | static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | 64 | static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) |
44 | { | 65 | { |
66 | unsigned int entry = tlb_44x_hwater--; | ||
67 | |||
68 | ppc44x_update_tlb_hwater(); | ||
69 | |||
45 | __asm__ __volatile__( | 70 | __asm__ __volatile__( |
46 | "tlbwe %2,%3,%4\n" | 71 | "tlbwe %2,%3,%4\n" |
47 | "tlbwe %1,%3,%5\n" | 72 | "tlbwe %1,%3,%5\n" |
@@ -50,7 +75,7 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | |||
50 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), | 75 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), |
51 | "r" (phys), | 76 | "r" (phys), |
52 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), | 77 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), |
53 | "r" (tlb_44x_hwater--), /* slot for this TLB entry */ | 78 | "r" (entry), |
54 | "i" (PPC44x_TLB_PAGEID), | 79 | "i" (PPC44x_TLB_PAGEID), |
55 | "i" (PPC44x_TLB_XLAT), | 80 | "i" (PPC44x_TLB_XLAT), |
56 | "i" (PPC44x_TLB_ATTRIB)); | 81 | "i" (PPC44x_TLB_ATTRIB)); |
@@ -58,6 +83,8 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | |||
58 | 83 | ||
59 | void __init MMU_init_hw(void) | 84 | void __init MMU_init_hw(void) |
60 | { | 85 | { |
86 | ppc44x_update_tlb_hwater(); | ||
87 | |||
61 | flush_instruction_cache(); | 88 | flush_instruction_cache(); |
62 | } | 89 | } |
63 | 90 | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 7b2510799266..1707d00331fc 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -306,7 +306,8 @@ good_area: | |||
306 | flush_dcache_icache_page(page); | 306 | flush_dcache_icache_page(page); |
307 | set_bit(PG_arch_1, &page->flags); | 307 | set_bit(PG_arch_1, &page->flags); |
308 | } | 308 | } |
309 | pte_update(ptep, 0, _PAGE_HWEXEC); | 309 | pte_update(ptep, 0, _PAGE_HWEXEC | |
310 | _PAGE_ACCESSED); | ||
310 | _tlbie(address, mm->context.id); | 311 | _tlbie(address, mm->context.id); |
311 | pte_unmap_unlock(ptep, ptl); | 312 | pte_unmap_unlock(ptep, ptl); |
312 | up_read(&mm->mmap_sem); | 313 | up_read(&mm->mmap_sem); |