diff options
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 286 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_booke.h | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 29 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 3 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc32.h | 61 |
5 files changed, 180 insertions, 207 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 2944529e8bf9..f3a1ea9d7fe4 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -293,119 +293,9 @@ interrupt_base: | |||
293 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) | 293 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) |
294 | 294 | ||
295 | /* Data Storage Interrupt */ | 295 | /* Data Storage Interrupt */ |
296 | START_EXCEPTION(DataStorage) | 296 | DATA_STORAGE_EXCEPTION |
297 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
298 | mtspr SPRN_SPRG1, r11 | ||
299 | mtspr SPRN_SPRG4W, r12 | ||
300 | mtspr SPRN_SPRG5W, r13 | ||
301 | mfcr r11 | ||
302 | mtspr SPRN_SPRG7W, r11 | ||
303 | |||
304 | /* | ||
305 | * Check if it was a store fault, if not then bail | ||
306 | * because a user tried to access a kernel or | ||
307 | * read-protected page. Otherwise, get the | ||
308 | * offending address and handle it. | ||
309 | */ | ||
310 | mfspr r10, SPRN_ESR | ||
311 | andis. r10, r10, ESR_ST@h | ||
312 | beq 2f | ||
313 | |||
314 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
315 | |||
316 | /* If we are faulting a kernel address, we have to use the | ||
317 | * kernel page tables. | ||
318 | */ | ||
319 | lis r11, PAGE_OFFSET@h | ||
320 | cmplw r10, r11 | ||
321 | blt+ 3f | ||
322 | lis r11, swapper_pg_dir@h | ||
323 | ori r11, r11, swapper_pg_dir@l | ||
324 | |||
325 | mfspr r12,SPRN_MMUCR | ||
326 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
327 | |||
328 | b 4f | ||
329 | |||
330 | /* Get the PGD for the current thread */ | ||
331 | 3: | ||
332 | mfspr r11,SPRN_SPRG3 | ||
333 | lwz r11,PGDIR(r11) | ||
334 | |||
335 | /* Load PID into MMUCR TID */ | ||
336 | mfspr r12,SPRN_MMUCR /* Get MMUCR */ | ||
337 | mfspr r13,SPRN_PID /* Get PID */ | ||
338 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
339 | |||
340 | 4: | ||
341 | mtspr SPRN_MMUCR,r12 | ||
342 | |||
343 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
344 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
345 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
346 | beq 2f /* Bail if no table */ | ||
347 | |||
348 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
349 | lwz r11, 4(r12) /* Get pte entry */ | ||
350 | |||
351 | andi. r13, r11, _PAGE_RW /* Is it writeable? */ | ||
352 | beq 2f /* Bail if not */ | ||
353 | |||
354 | /* Update 'changed'. | ||
355 | */ | ||
356 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
357 | stw r11, 4(r12) /* Update Linux page table */ | ||
358 | |||
359 | li r13, PPC44x_TLB_SR@l /* Set SR */ | ||
360 | rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
361 | rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ | ||
362 | rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ | ||
363 | rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
364 | rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ | ||
365 | and r12, r12, r11 /* HWEXEC/RW & USER */ | ||
366 | rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
367 | rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ | ||
368 | |||
369 | rlwimi r11,r13,0,26,31 /* Insert static perms */ | ||
370 | |||
371 | /* | ||
372 | * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added | ||
373 | * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see | ||
374 | * include/asm-powerpc/pgtable-ppc32.h for details). | ||
375 | */ | ||
376 | rlwinm r11,r11,0,20,10 | ||
377 | |||
378 | /* find the TLB index that caused the fault. It has to be here. */ | ||
379 | tlbsx r10, 0, r10 | ||
380 | |||
381 | tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
382 | |||
383 | /* Done...restore registers and get out of here. | ||
384 | */ | ||
385 | mfspr r11, SPRN_SPRG7R | ||
386 | mtcr r11 | ||
387 | mfspr r13, SPRN_SPRG5R | ||
388 | mfspr r12, SPRN_SPRG4R | ||
389 | 297 | ||
390 | mfspr r11, SPRN_SPRG1 | 298 | /* Instruction Storage Interrupt */ |
391 | mfspr r10, SPRN_SPRG0 | ||
392 | rfi /* Force context change */ | ||
393 | |||
394 | 2: | ||
395 | /* | ||
396 | * The bailout. Restore registers to pre-exception conditions | ||
397 | * and call the heavyweights to help us out. | ||
398 | */ | ||
399 | mfspr r11, SPRN_SPRG7R | ||
400 | mtcr r11 | ||
401 | mfspr r13, SPRN_SPRG5R | ||
402 | mfspr r12, SPRN_SPRG4R | ||
403 | |||
404 | mfspr r11, SPRN_SPRG1 | ||
405 | mfspr r10, SPRN_SPRG0 | ||
406 | b data_access | ||
407 | |||
408 | /* Instruction Storage Interrupt */ | ||
409 | INSTRUCTION_STORAGE_EXCEPTION | 299 | INSTRUCTION_STORAGE_EXCEPTION |
410 | 300 | ||
411 | /* External Input Interrupt */ | 301 | /* External Input Interrupt */ |
@@ -423,7 +313,6 @@ interrupt_base: | |||
423 | #else | 313 | #else |
424 | EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) | 314 | EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) |
425 | #endif | 315 | #endif |
426 | |||
427 | /* System Call Interrupt */ | 316 | /* System Call Interrupt */ |
428 | START_EXCEPTION(SystemCall) | 317 | START_EXCEPTION(SystemCall) |
429 | NORMAL_EXCEPTION_PROLOG | 318 | NORMAL_EXCEPTION_PROLOG |
@@ -484,18 +373,57 @@ interrupt_base: | |||
484 | 4: | 373 | 4: |
485 | mtspr SPRN_MMUCR,r12 | 374 | mtspr SPRN_MMUCR,r12 |
486 | 375 | ||
376 | /* Mask of required permission bits. Note that while we | ||
377 | * do copy ESR:ST to _PAGE_RW position as trying to write | ||
378 | * to an RO page is pretty common, we don't do it with | ||
379 | * _PAGE_DIRTY. We could do it, but it's a fairly rare | ||
380 | * event so I'd rather take the overhead when it happens | ||
381 | * rather than adding an instruction here. We should measure | ||
382 | * whether the whole thing is worth it in the first place | ||
383 | * as we could avoid loading SPRN_ESR completely in the first | ||
384 | * place... | ||
385 | * | ||
386 | * TODO: Is it worth doing that mfspr & rlwimi in the first | ||
387 | * place or can we save a couple of instructions here ? | ||
388 | */ | ||
389 | mfspr r12,SPRN_ESR | ||
390 | li r13,_PAGE_PRESENT|_PAGE_ACCESSED | ||
391 | rlwimi r13,r12,10,30,30 | ||
392 | |||
393 | /* Load the PTE */ | ||
487 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | 394 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ |
488 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | 395 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ |
489 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | 396 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ |
490 | beq 2f /* Bail if no table */ | 397 | beq 2f /* Bail if no table */ |
491 | 398 | ||
492 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | 399 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ |
493 | lwz r11, 4(r12) /* Get pte entry */ | 400 | lwz r11, 0(r12) /* Get high word of pte entry */ |
494 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | 401 | lwz r12, 4(r12) /* Get low word of pte entry */ |
495 | beq 2f /* Bail if not present */ | ||
496 | 402 | ||
497 | ori r11, r11, _PAGE_ACCESSED | 403 | lis r10,tlb_44x_index@ha |
498 | stw r11, 4(r12) | 404 | |
405 | andc. r13,r13,r12 /* Check permission */ | ||
406 | |||
407 | /* Load the next available TLB index */ | ||
408 | lwz r13,tlb_44x_index@l(r10) | ||
409 | |||
410 | bne 2f /* Bail if permission mismach */ | ||
411 | |||
412 | /* Increment, rollover, and store TLB index */ | ||
413 | addi r13,r13,1 | ||
414 | |||
415 | /* Compare with watermark (instruction gets patched) */ | ||
416 | .globl tlb_44x_patch_hwater_D | ||
417 | tlb_44x_patch_hwater_D: | ||
418 | cmpwi 0,r13,1 /* reserve entries */ | ||
419 | ble 5f | ||
420 | li r13,0 | ||
421 | 5: | ||
422 | /* Store the next available TLB index */ | ||
423 | stw r13,tlb_44x_index@l(r10) | ||
424 | |||
425 | /* Re-load the faulting address */ | ||
426 | mfspr r10,SPRN_DEAR | ||
499 | 427 | ||
500 | /* Jump to common tlb load */ | 428 | /* Jump to common tlb load */ |
501 | b finish_tlb_load | 429 | b finish_tlb_load |
@@ -510,7 +438,7 @@ interrupt_base: | |||
510 | mfspr r12, SPRN_SPRG4R | 438 | mfspr r12, SPRN_SPRG4R |
511 | mfspr r11, SPRN_SPRG1 | 439 | mfspr r11, SPRN_SPRG1 |
512 | mfspr r10, SPRN_SPRG0 | 440 | mfspr r10, SPRN_SPRG0 |
513 | b data_access | 441 | b DataStorage |
514 | 442 | ||
515 | /* Instruction TLB Error Interrupt */ | 443 | /* Instruction TLB Error Interrupt */ |
516 | /* | 444 | /* |
@@ -554,18 +482,42 @@ interrupt_base: | |||
554 | 4: | 482 | 4: |
555 | mtspr SPRN_MMUCR,r12 | 483 | mtspr SPRN_MMUCR,r12 |
556 | 484 | ||
485 | /* Make up the required permissions */ | ||
486 | li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC | ||
487 | |||
557 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | 488 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ |
558 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | 489 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ |
559 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | 490 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ |
560 | beq 2f /* Bail if no table */ | 491 | beq 2f /* Bail if no table */ |
561 | 492 | ||
562 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | 493 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ |
563 | lwz r11, 4(r12) /* Get pte entry */ | 494 | lwz r11, 0(r12) /* Get high word of pte entry */ |
564 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | 495 | lwz r12, 4(r12) /* Get low word of pte entry */ |
565 | beq 2f /* Bail if not present */ | ||
566 | 496 | ||
567 | ori r11, r11, _PAGE_ACCESSED | 497 | lis r10,tlb_44x_index@ha |
568 | stw r11, 4(r12) | 498 | |
499 | andc. r13,r13,r12 /* Check permission */ | ||
500 | |||
501 | /* Load the next available TLB index */ | ||
502 | lwz r13,tlb_44x_index@l(r10) | ||
503 | |||
504 | bne 2f /* Bail if permission mismach */ | ||
505 | |||
506 | /* Increment, rollover, and store TLB index */ | ||
507 | addi r13,r13,1 | ||
508 | |||
509 | /* Compare with watermark (instruction gets patched) */ | ||
510 | .globl tlb_44x_patch_hwater_I | ||
511 | tlb_44x_patch_hwater_I: | ||
512 | cmpwi 0,r13,1 /* reserve entries */ | ||
513 | ble 5f | ||
514 | li r13,0 | ||
515 | 5: | ||
516 | /* Store the next available TLB index */ | ||
517 | stw r13,tlb_44x_index@l(r10) | ||
518 | |||
519 | /* Re-load the faulting address */ | ||
520 | mfspr r10,SPRN_SRR0 | ||
569 | 521 | ||
570 | /* Jump to common TLB load point */ | 522 | /* Jump to common TLB load point */ |
571 | b finish_tlb_load | 523 | b finish_tlb_load |
@@ -587,86 +539,40 @@ interrupt_base: | |||
587 | 539 | ||
588 | /* | 540 | /* |
589 | * Local functions | 541 | * Local functions |
590 | */ | 542 | */ |
591 | /* | ||
592 | * Data TLB exceptions will bail out to this point | ||
593 | * if they can't resolve the lightweight TLB fault. | ||
594 | */ | ||
595 | data_access: | ||
596 | NORMAL_EXCEPTION_PROLOG | ||
597 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
598 | stw r5,_ESR(r11) | ||
599 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
600 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
601 | 543 | ||
602 | /* | 544 | /* |
603 | 545 | ||
604 | * Both the instruction and data TLB miss get to this | 546 | * Both the instruction and data TLB miss get to this |
605 | * point to load the TLB. | 547 | * point to load the TLB. |
606 | * r10 - EA of fault | 548 | * r10 - EA of fault |
607 | * r11 - available to use | 549 | * r11 - PTE high word value |
608 | * r12 - Pointer to the 64-bit PTE | 550 | * r12 - PTE low word value |
609 | * r13 - available to use | 551 | * r13 - TLB index |
610 | * MMUCR - loaded with proper value when we get here | 552 | * MMUCR - loaded with proper value when we get here |
611 | * Upon exit, we reload everything and RFI. | 553 | * Upon exit, we reload everything and RFI. |
612 | */ | 554 | */ |
613 | finish_tlb_load: | 555 | finish_tlb_load: |
614 | /* | 556 | /* Combine RPN & ERPN an write WS 0 */ |
615 | * We set execute, because we don't have the granularity to | 557 | rlwimi r11,r12,0,0,19 |
616 | * properly set this at the page level (Linux problem). | 558 | tlbwe r11,r13,PPC44x_TLB_XLAT |
617 | * If shared is set, we cause a zero PID->TID load. | ||
618 | * Many of these bits are software only. Bits we don't set | ||
619 | * here we (properly should) assume have the appropriate value. | ||
620 | */ | ||
621 | |||
622 | /* Load the next available TLB index */ | ||
623 | lis r13, tlb_44x_index@ha | ||
624 | lwz r13, tlb_44x_index@l(r13) | ||
625 | /* Load the TLB high watermark */ | ||
626 | lis r11, tlb_44x_hwater@ha | ||
627 | lwz r11, tlb_44x_hwater@l(r11) | ||
628 | |||
629 | /* Increment, rollover, and store TLB index */ | ||
630 | addi r13, r13, 1 | ||
631 | cmpw 0, r13, r11 /* reserve entries */ | ||
632 | ble 7f | ||
633 | li r13, 0 | ||
634 | 7: | ||
635 | /* Store the next available TLB index */ | ||
636 | lis r11, tlb_44x_index@ha | ||
637 | stw r13, tlb_44x_index@l(r11) | ||
638 | |||
639 | lwz r11, 0(r12) /* Get MS word of PTE */ | ||
640 | lwz r12, 4(r12) /* Get LS word of PTE */ | ||
641 | rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ | ||
642 | tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ | ||
643 | 559 | ||
644 | /* | 560 | /* |
645 | * Create PAGEID. This is the faulting address, | 561 | * Create WS1. This is the faulting address (EPN), |
646 | * page size, and valid flag. | 562 | * page size, and valid flag. |
647 | */ | 563 | */ |
648 | li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K | 564 | li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K |
649 | rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ | 565 | rlwimi r10,r11,0,20,31 /* Insert valid and page size*/ |
650 | tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ | 566 | tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ |
651 | 567 | ||
652 | li r10, PPC44x_TLB_SR@l /* Set SR */ | 568 | /* And WS 2 */ |
653 | rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ | 569 | li r10,0xf85 /* Mask to apply from PTE */ |
654 | rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | 570 | rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ |
655 | rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ | 571 | and r11,r12,r10 /* Mask PTE bits to keep */ |
656 | rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | 572 | andi. r10,r12,_PAGE_USER /* User page ? */ |
657 | and r11, r12, r11 /* HWEXEC & USER */ | 573 | beq 1f /* nope, leave U bits empty */ |
658 | rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ | 574 | rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ |
659 | 575 | 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ | |
660 | rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ | ||
661 | |||
662 | /* | ||
663 | * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added | ||
664 | * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see | ||
665 | * include/asm-powerpc/pgtable-ppc32.h for details). | ||
666 | */ | ||
667 | rlwinm r12, r12, 0, 20, 10 | ||
668 | |||
669 | tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
670 | 576 | ||
671 | /* Done...restore registers and get out of here. | 577 | /* Done...restore registers and get out of here. |
672 | */ | 578 | */ |
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index b0461be1c928..fce2df988504 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h | |||
@@ -340,6 +340,14 @@ label: | |||
340 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 340 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
341 | EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) | 341 | EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) |
342 | 342 | ||
343 | #define DATA_STORAGE_EXCEPTION \ | ||
344 | START_EXCEPTION(DataStorage) \ | ||
345 | NORMAL_EXCEPTION_PROLOG; \ | ||
346 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ | ||
347 | stw r5,_ESR(r11); \ | ||
348 | mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ | ||
349 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
350 | |||
343 | #define INSTRUCTION_STORAGE_EXCEPTION \ | 351 | #define INSTRUCTION_STORAGE_EXCEPTION \ |
344 | START_EXCEPTION(InstructionStorage) \ | 352 | START_EXCEPTION(InstructionStorage) \ |
345 | NORMAL_EXCEPTION_PROLOG; \ | 353 | NORMAL_EXCEPTION_PROLOG; \ |
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 953fb919eb06..98052ac96580 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/mmu.h> | 27 | #include <asm/mmu.h> |
28 | #include <asm/system.h> | 28 | #include <asm/system.h> |
29 | #include <asm/page.h> | 29 | #include <asm/page.h> |
30 | #include <asm/cacheflush.h> | ||
30 | 31 | ||
31 | #include "mmu_decl.h" | 32 | #include "mmu_decl.h" |
32 | 33 | ||
@@ -37,11 +38,35 @@ unsigned int tlb_44x_index; /* = 0 */ | |||
37 | unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; | 38 | unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; |
38 | int icache_44x_need_flush; | 39 | int icache_44x_need_flush; |
39 | 40 | ||
41 | static void __init ppc44x_update_tlb_hwater(void) | ||
42 | { | ||
43 | extern unsigned int tlb_44x_patch_hwater_D[]; | ||
44 | extern unsigned int tlb_44x_patch_hwater_I[]; | ||
45 | |||
46 | /* The TLB miss handlers hard codes the watermark in a cmpli | ||
47 | * instruction to improve performances rather than loading it | ||
48 | * from the global variable. Thus, we patch the instructions | ||
49 | * in the 2 TLB miss handlers when updating the value | ||
50 | */ | ||
51 | tlb_44x_patch_hwater_D[0] = (tlb_44x_patch_hwater_D[0] & 0xffff0000) | | ||
52 | tlb_44x_hwater; | ||
53 | flush_icache_range((unsigned long)&tlb_44x_patch_hwater_D[0], | ||
54 | (unsigned long)&tlb_44x_patch_hwater_D[1]); | ||
55 | tlb_44x_patch_hwater_I[0] = (tlb_44x_patch_hwater_I[0] & 0xffff0000) | | ||
56 | tlb_44x_hwater; | ||
57 | flush_icache_range((unsigned long)&tlb_44x_patch_hwater_I[0], | ||
58 | (unsigned long)&tlb_44x_patch_hwater_I[1]); | ||
59 | } | ||
60 | |||
40 | /* | 61 | /* |
41 | * "Pins" a 256MB TLB entry in AS0 for kernel lowmem | 62 | * "Pins" a 256MB TLB entry in AS0 for kernel lowmem |
42 | */ | 63 | */ |
43 | static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | 64 | static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) |
44 | { | 65 | { |
66 | unsigned int entry = tlb_44x_hwater--; | ||
67 | |||
68 | ppc44x_update_tlb_hwater(); | ||
69 | |||
45 | __asm__ __volatile__( | 70 | __asm__ __volatile__( |
46 | "tlbwe %2,%3,%4\n" | 71 | "tlbwe %2,%3,%4\n" |
47 | "tlbwe %1,%3,%5\n" | 72 | "tlbwe %1,%3,%5\n" |
@@ -50,7 +75,7 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | |||
50 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), | 75 | : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), |
51 | "r" (phys), | 76 | "r" (phys), |
52 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), | 77 | "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), |
53 | "r" (tlb_44x_hwater--), /* slot for this TLB entry */ | 78 | "r" (entry), |
54 | "i" (PPC44x_TLB_PAGEID), | 79 | "i" (PPC44x_TLB_PAGEID), |
55 | "i" (PPC44x_TLB_XLAT), | 80 | "i" (PPC44x_TLB_XLAT), |
56 | "i" (PPC44x_TLB_ATTRIB)); | 81 | "i" (PPC44x_TLB_ATTRIB)); |
@@ -58,6 +83,8 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) | |||
58 | 83 | ||
59 | void __init MMU_init_hw(void) | 84 | void __init MMU_init_hw(void) |
60 | { | 85 | { |
86 | ppc44x_update_tlb_hwater(); | ||
87 | |||
61 | flush_instruction_cache(); | 88 | flush_instruction_cache(); |
62 | } | 89 | } |
63 | 90 | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 7b2510799266..1707d00331fc 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -306,7 +306,8 @@ good_area: | |||
306 | flush_dcache_icache_page(page); | 306 | flush_dcache_icache_page(page); |
307 | set_bit(PG_arch_1, &page->flags); | 307 | set_bit(PG_arch_1, &page->flags); |
308 | } | 308 | } |
309 | pte_update(ptep, 0, _PAGE_HWEXEC); | 309 | pte_update(ptep, 0, _PAGE_HWEXEC | |
310 | _PAGE_ACCESSED); | ||
310 | _tlbie(address, mm->context.id); | 311 | _tlbie(address, mm->context.id); |
311 | pte_unmap_unlock(ptep, ptl); | 312 | pte_unmap_unlock(ptep, ptl); |
312 | up_read(&mm->mmap_sem); | 313 | up_read(&mm->mmap_sem); |
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index e1d2bb57f1d5..11eede4a2906 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h | |||
@@ -182,6 +182,9 @@ extern int icache_44x_need_flush; | |||
182 | #define _PMD_SIZE_16M 0x0e0 | 182 | #define _PMD_SIZE_16M 0x0e0 |
183 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) | 183 | #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) |
184 | 184 | ||
185 | /* Until my rework is finished, 40x still needs atomic PTE updates */ | ||
186 | #define PTE_ATOMIC_UPDATES 1 | ||
187 | |||
185 | #elif defined(CONFIG_44x) | 188 | #elif defined(CONFIG_44x) |
186 | /* | 189 | /* |
187 | * Definitions for PPC440 | 190 | * Definitions for PPC440 |
@@ -253,17 +256,17 @@ extern int icache_44x_need_flush; | |||
253 | */ | 256 | */ |
254 | 257 | ||
255 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ | 258 | #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ |
256 | #define _PAGE_RW 0x00000002 /* S: Write permission */ | 259 | #define _PAGE_RW 0x00000002 /* S: Write permission */ |
257 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ | 260 | #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ |
261 | #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ | ||
258 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ | 262 | #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ |
259 | #define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ | 263 | #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ |
260 | #define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ | 264 | #define _PAGE_USER 0x00000040 /* S: User page */ |
261 | #define _PAGE_USER 0x00000040 /* S: User page */ | 265 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ |
262 | #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ | 266 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ |
263 | #define _PAGE_GUARDED 0x00000100 /* H: G bit */ | 267 | #define _PAGE_COHERENT 0x00000200 /* H: M bit */ |
264 | #define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ | 268 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ |
265 | #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ | 269 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ |
266 | #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ | ||
267 | 270 | ||
268 | /* TODO: Add large page lowmem mapping support */ | 271 | /* TODO: Add large page lowmem mapping support */ |
269 | #define _PMD_PRESENT 0 | 272 | #define _PMD_PRESENT 0 |
@@ -273,6 +276,7 @@ extern int icache_44x_need_flush; | |||
273 | /* ERPN in a PTE never gets cleared, ignore it */ | 276 | /* ERPN in a PTE never gets cleared, ignore it */ |
274 | #define _PTE_NONE_MASK 0xffffffff00000000ULL | 277 | #define _PTE_NONE_MASK 0xffffffff00000000ULL |
275 | 278 | ||
279 | |||
276 | #elif defined(CONFIG_FSL_BOOKE) | 280 | #elif defined(CONFIG_FSL_BOOKE) |
277 | /* | 281 | /* |
278 | MMU Assist Register 3: | 282 | MMU Assist Register 3: |
@@ -315,6 +319,9 @@ extern int icache_44x_need_flush; | |||
315 | #define _PMD_PRESENT_MASK (PAGE_MASK) | 319 | #define _PMD_PRESENT_MASK (PAGE_MASK) |
316 | #define _PMD_BAD (~PAGE_MASK) | 320 | #define _PMD_BAD (~PAGE_MASK) |
317 | 321 | ||
322 | /* Until my rework is finished, FSL BookE still needs atomic PTE updates */ | ||
323 | #define PTE_ATOMIC_UPDATES 1 | ||
324 | |||
318 | #elif defined(CONFIG_8xx) | 325 | #elif defined(CONFIG_8xx) |
319 | /* Definitions for 8xx embedded chips. */ | 326 | /* Definitions for 8xx embedded chips. */ |
320 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ | 327 | #define _PAGE_PRESENT 0x0001 /* Page is valid */ |
@@ -345,6 +352,9 @@ extern int icache_44x_need_flush; | |||
345 | 352 | ||
346 | #define _PTE_NONE_MASK _PAGE_ACCESSED | 353 | #define _PTE_NONE_MASK _PAGE_ACCESSED |
347 | 354 | ||
355 | /* Until my rework is finished, 8xx still needs atomic PTE updates */ | ||
356 | #define PTE_ATOMIC_UPDATES 1 | ||
357 | |||
348 | #else /* CONFIG_6xx */ | 358 | #else /* CONFIG_6xx */ |
349 | /* Definitions for 60x, 740/750, etc. */ | 359 | /* Definitions for 60x, 740/750, etc. */ |
350 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ | 360 | #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ |
@@ -365,6 +375,10 @@ extern int icache_44x_need_flush; | |||
365 | #define _PMD_PRESENT 0 | 375 | #define _PMD_PRESENT 0 |
366 | #define _PMD_PRESENT_MASK (PAGE_MASK) | 376 | #define _PMD_PRESENT_MASK (PAGE_MASK) |
367 | #define _PMD_BAD (~PAGE_MASK) | 377 | #define _PMD_BAD (~PAGE_MASK) |
378 | |||
379 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
380 | #define PTE_ATOMIC_UPDATES 1 | ||
381 | |||
368 | #endif | 382 | #endif |
369 | 383 | ||
370 | /* | 384 | /* |
@@ -557,9 +571,11 @@ extern void add_hash_page(unsigned context, unsigned long va, | |||
557 | * low PTE word since we expect ALL flag bits to be there | 571 | * low PTE word since we expect ALL flag bits to be there |
558 | */ | 572 | */ |
559 | #ifndef CONFIG_PTE_64BIT | 573 | #ifndef CONFIG_PTE_64BIT |
560 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, | 574 | static inline unsigned long pte_update(pte_t *p, |
575 | unsigned long clr, | ||
561 | unsigned long set) | 576 | unsigned long set) |
562 | { | 577 | { |
578 | #ifdef PTE_ATOMIC_UPDATES | ||
563 | unsigned long old, tmp; | 579 | unsigned long old, tmp; |
564 | 580 | ||
565 | __asm__ __volatile__("\ | 581 | __asm__ __volatile__("\ |
@@ -572,16 +588,26 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, | |||
572 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | 588 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
573 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | 589 | : "r" (p), "r" (clr), "r" (set), "m" (*p) |
574 | : "cc" ); | 590 | : "cc" ); |
591 | #else /* PTE_ATOMIC_UPDATES */ | ||
592 | unsigned long old = pte_val(*p); | ||
593 | *p = __pte((old & ~clr) | set); | ||
594 | #endif /* !PTE_ATOMIC_UPDATES */ | ||
595 | |||
575 | #ifdef CONFIG_44x | 596 | #ifdef CONFIG_44x |
576 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) | 597 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) |
577 | icache_44x_need_flush = 1; | 598 | icache_44x_need_flush = 1; |
578 | #endif | 599 | #endif |
579 | return old; | 600 | return old; |
580 | } | 601 | } |
581 | #else | 602 | #else /* CONFIG_PTE_64BIT */ |
582 | static inline unsigned long long pte_update(pte_t *p, unsigned long clr, | 603 | /* TODO: Change that to only modify the low word and move set_pte_at() |
583 | unsigned long set) | 604 | * out of line |
605 | */ | ||
606 | static inline unsigned long long pte_update(pte_t *p, | ||
607 | unsigned long clr, | ||
608 | unsigned long set) | ||
584 | { | 609 | { |
610 | #ifdef PTE_ATOMIC_UPDATES | ||
585 | unsigned long long old; | 611 | unsigned long long old; |
586 | unsigned long tmp; | 612 | unsigned long tmp; |
587 | 613 | ||
@@ -596,13 +622,18 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, | |||
596 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | 622 | : "=&r" (old), "=&r" (tmp), "=m" (*p) |
597 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | 623 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) |
598 | : "cc" ); | 624 | : "cc" ); |
625 | #else /* PTE_ATOMIC_UPDATES */ | ||
626 | unsigned long long old = pte_val(*p); | ||
627 | *p = __pte((old & ~clr) | set); | ||
628 | #endif /* !PTE_ATOMIC_UPDATES */ | ||
629 | |||
599 | #ifdef CONFIG_44x | 630 | #ifdef CONFIG_44x |
600 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) | 631 | if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) |
601 | icache_44x_need_flush = 1; | 632 | icache_44x_need_flush = 1; |
602 | #endif | 633 | #endif |
603 | return old; | 634 | return old; |
604 | } | 635 | } |
605 | #endif | 636 | #endif /* CONFIG_PTE_64BIT */ |
606 | 637 | ||
607 | /* | 638 | /* |
608 | * set_pte stores a linux PTE into the linux page table. | 639 | * set_pte stores a linux PTE into the linux page table. |
@@ -671,7 +702,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |||
671 | ({ \ | 702 | ({ \ |
672 | int __changed = !pte_same(*(__ptep), __entry); \ | 703 | int __changed = !pte_same(*(__ptep), __entry); \ |
673 | if (__changed) { \ | 704 | if (__changed) { \ |
674 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | 705 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ |
675 | flush_tlb_page_nohash(__vma, __address); \ | 706 | flush_tlb_page_nohash(__vma, __address); \ |
676 | } \ | 707 | } \ |
677 | __changed; \ | 708 | __changed; \ |