aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoakim Tjernlund <joakim.tjernlund@transmode.se>2009-11-19 19:21:03 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-09 01:10:36 -0500
commitfe11dc3f9628e5393e932567b7e29d35cbbad136 (patch)
tree32b2513f5799946d42a7b4c912c0fa094763810f /arch
parent5efab4a02c89c252fb4cce097aafde5f8208dbfe (diff)
powerpc/8xx: Update TLB asm so it behaves as linux mm expects.
Update the TLB asm to make proper use of _PAGE_DIRY and _PAGE_ACCESSED. Get rid of _PAGE_HWWRITE too. Pros: - I/D TLB Miss never needs to write to the linux pte. - _PAGE_ACCESSED is only set on TLB Error fixing accounting - _PAGE_DIRTY is mapped to 0x100, the changed bit, and is set directly when a page has been made dirty. - Proper RO/RW mapping of user space. - Free up 2 SW TLB bits in the linux pte(add back _PAGE_WRITETHRU ?) - kernel RO/user NA support. Cons: - A few more instructions in the TLB Miss routines. Signed-off-by: Joakim Tjernlund <Joakim.Tjernlund@transmode.se> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h13
-rw-r--r--arch/powerpc/kernel/head_8xx.S99
2 files changed, 57 insertions, 55 deletions
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index dd5ea95fe61e..68ba861331ee 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -33,21 +33,20 @@
33#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ 33#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
34#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ 34#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
35#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */ 35#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
36#define _PAGE_DIRTY 0x0100 /* C: page changed */
36 37
37/* These five software bits must be masked out when the entry is loaded 38/* These 3 software bits must be masked out when the entry is loaded
38 * into the TLB. 39 * into the TLB, 2 SW bits left.
39 */ 40 */
40#define _PAGE_GUARDED 0x0010 /* software: guarded access */ 41#define _PAGE_GUARDED 0x0010 /* software: guarded access */
41#define _PAGE_DIRTY 0x0020 /* software: page changed */ 42#define _PAGE_ACCESSED 0x0020 /* software: page referenced */
42#define _PAGE_RW 0x0040 /* software: user write access allowed */
43#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
44 43
45/* Setting any bits in the nibble with the follow two controls will 44/* Setting any bits in the nibble with the follow two controls will
46 * require a TLB exception handler change. It is assumed unused bits 45 * require a TLB exception handler change. It is assumed unused bits
47 * are always zero. 46 * are always zero.
48 */ 47 */
49#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ 48#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */
50#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ 49#define _PAGE_USER 0x0800 /* msb PP bits */
51 50
52#define _PMD_PRESENT 0x0001 51#define _PMD_PRESENT 0x0001
53#define _PMD_BAD 0x0ff0 52#define _PMD_BAD 0x0ff0
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6ded19d01891..97bd523a0278 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -333,26 +333,20 @@ InstructionTLBMiss:
333 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ 333 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
334 lwz r10, 0(r11) /* Get the pte */ 334 lwz r10, 0(r11) /* Get the pte */
335 335
336#ifdef CONFIG_SWAP 336 andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
337 /* do not set the _PAGE_ACCESSED bit of a non-present page */ 337 cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
338 andi. r11, r10, _PAGE_PRESENT 338 bne- cr0, 2f
339 beq 4f 339
340 ori r10, r10, _PAGE_ACCESSED 340 /* Clear PP lsb, 0x400 */
341 mfspr r11, SPRN_MD_TWC /* get the pte address again */ 341 rlwinm r10, r10, 0, 22, 20
342 stw r10, 0(r11)
3434:
344#else
345 ori r10, r10, _PAGE_ACCESSED
346 stw r10, 0(r11)
347#endif
348 342
349 /* The Linux PTE won't go exactly into the MMU TLB. 343 /* The Linux PTE won't go exactly into the MMU TLB.
350 * Software indicator bits 21, 22 and 28 must be clear. 344 * Software indicator bits 22 and 28 must be clear.
351 * Software indicator bits 24, 25, 26, and 27 must be 345 * Software indicator bits 24, 25, 26, and 27 must be
352 * set. All other Linux PTE bits control the behavior 346 * set. All other Linux PTE bits control the behavior
353 * of the MMU. 347 * of the MMU.
354 */ 348 */
3552: li r11, 0x00f0 349 li r11, 0x00f0
356 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ 350 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
357 DO_8xx_CPU6(0x2d80, r3) 351 DO_8xx_CPU6(0x2d80, r3)
358 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ 352 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
@@ -365,6 +359,22 @@ InstructionTLBMiss:
365 lwz r3, 8(r0) 359 lwz r3, 8(r0)
366#endif 360#endif
367 rfi 361 rfi
3622:
363 mfspr r11, SPRN_SRR1
364 /* clear all error bits as TLB Miss
365 * sets a few unconditionally
366 */
367 rlwinm r11, r11, 0, 0xffff
368 mtspr SPRN_SRR1, r11
369
370 mfspr r10, SPRN_M_TW /* Restore registers */
371 lwz r11, 0(r0)
372 mtcr r11
373 lwz r11, 4(r0)
374#ifdef CONFIG_8xx_CPU6
375 lwz r3, 8(r0)
376#endif
377 b InstructionAccess
368 378
369 . = 0x1200 379 . = 0x1200
370DataStoreTLBMiss: 380DataStoreTLBMiss:
@@ -409,21 +419,27 @@ DataStoreTLBMiss:
409 DO_8xx_CPU6(0x3b80, r3) 419 DO_8xx_CPU6(0x3b80, r3)
410 mtspr SPRN_MD_TWC, r11 420 mtspr SPRN_MD_TWC, r11
411 421
412#ifdef CONFIG_SWAP 422 /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
413 /* do not set the _PAGE_ACCESSED bit of a non-present page */ 423 * We also need to know if the insn is a load/store, so:
414 andi. r11, r10, _PAGE_PRESENT 424 * Clear _PAGE_PRESENT and load that which will
415 beq 4f 425 * trap into DTLB Error with store bit set accordinly.
416 ori r10, r10, _PAGE_ACCESSED 426 */
4174: 427 /* PRESENT=0x1, ACCESSED=0x20
418 /* and update pte in table */ 428 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
419#else 429 * r10 = (r10 & ~PRESENT) | r11;
420 ori r10, r10, _PAGE_ACCESSED 430 */
421#endif 431 rlwinm r11, r10, 32-5, 31, 31
422 mfspr r11, SPRN_MD_TWC /* get the pte address again */ 432 and r11, r11, r10
423 stw r10, 0(r11) 433 rlwimi r10, r11, 0, 31, 31
434
435 /* Honour kernel RO, User NA */
436 andi. r11, r10, _PAGE_USER | _PAGE_RW
437 bne- cr0, 5f
438 ori r10,r10, 0x200 /* Extended encoding, bit 22 */
4395: xori r10, r10, _PAGE_RW /* invert RW bit */
424 440
425 /* The Linux PTE won't go exactly into the MMU TLB. 441 /* The Linux PTE won't go exactly into the MMU TLB.
426 * Software indicator bits 21, 22 and 28 must be clear. 442 * Software indicator bits 22 and 28 must be clear.
427 * Software indicator bits 24, 25, 26, and 27 must be 443 * Software indicator bits 24, 25, 26, and 27 must be
428 * set. All other Linux PTE bits control the behavior 444 * set. All other Linux PTE bits control the behavior
429 * of the MMU. 445 * of the MMU.
@@ -469,11 +485,12 @@ DataTLBError:
469 stw r10, 0(r0) 485 stw r10, 0(r0)
470 stw r11, 4(r0) 486 stw r11, 4(r0)
471 487
472 /* First, make sure this was a store operation. 488 mfspr r11, SPRN_DSISR
489 andis. r11, r11, 0x4800 /* !translation or protection */
490 bne 2f /* branch if either is set */
491 /* Only Change bit left now, do it here as it is faster
492 * than trapping to the C fault handler.
473 */ 493 */
474 mfspr r10, SPRN_DSISR
475 andis. r11, r10, 0x0200 /* If set, indicates store op */
476 beq 2f
477 494
478 /* The EA of a data TLB miss is automatically stored in the MD_EPN 495 /* The EA of a data TLB miss is automatically stored in the MD_EPN
479 * register. The EA of a data TLB error is automatically stored in 496 * register. The EA of a data TLB error is automatically stored in
@@ -522,26 +539,12 @@ DataTLBError:
522 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ 539 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
523 lwz r10, 0(r11) /* Get the pte */ 540 lwz r10, 0(r11) /* Get the pte */
524 541
525 andi. r11, r10, _PAGE_RW /* Is it writeable? */ 542 ori r10, r10, _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HWWRITE
526 beq 2f /* Bail out if not */
527
528 /* Update 'changed', among others.
529 */
530#ifdef CONFIG_SWAP
531 ori r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE
532 /* do not set the _PAGE_ACCESSED bit of a non-present page */
533 andi. r11, r10, _PAGE_PRESENT
534 beq 4f
535 ori r10, r10, _PAGE_ACCESSED
5364:
537#else
538 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
539#endif
540 mfspr r11, SPRN_MD_TWC /* Get pte address again */
541 stw r10, 0(r11) /* and update pte in table */ 543 stw r10, 0(r11) /* and update pte in table */
544 xori r10, r10, _PAGE_RW /* RW bit is inverted */
542 545
543 /* The Linux PTE won't go exactly into the MMU TLB. 546 /* The Linux PTE won't go exactly into the MMU TLB.
544 * Software indicator bits 21, 22 and 28 must be clear. 547 * Software indicator bits 22 and 28 must be clear.
545 * Software indicator bits 24, 25, 26, and 27 must be 548 * Software indicator bits 24, 25, 26, and 27 must be
546 * set. All other Linux PTE bits control the behavior 549 * set. All other Linux PTE bits control the behavior
547 * of the MMU. 550 * of the MMU.