diff options
author | Paul Mackerras <paulus@samba.org> | 2006-06-14 20:45:18 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-06-14 20:45:18 -0400 |
commit | bf72aeba2ffef599d1d386425c9e46b82be657cd (patch) | |
tree | ead8e5111dbcfa22e156999d1bb8a96e50f06fef /arch/powerpc/mm/slb_low.S | |
parent | 31925323b1b51bb65db729e029472a8b1f635b7d (diff) |
powerpc: Use 64k pages without needing cache-inhibited large pages
Some POWER5+ machines can do 64k hardware pages for normal memory but
not for cache-inhibited pages. This patch lets us use 64k hardware
pages for most user processes on such machines (assuming the kernel
has been configured with CONFIG_PPC_64K_PAGES=y). User processes
start out using 64k pages and get switched to 4k pages if they use any
non-cacheable mappings.
With this, we use 64k pages for the vmalloc region and 4k pages for
the imalloc region. If anything creates a non-cacheable mapping in
the vmalloc region, the vmalloc region will get switched to 4k pages.
I don't know of any driver other than the DRM that would do this,
though, and these machines don't have AGP.
When a region gets switched from 64k pages to 4k pages, we do not have
to clear out all the 64k HPTEs from the hash table immediately. We
use the _PAGE_COMBO bit in the Linux PTE to indicate whether the page
was hashed in as a 64k page or a set of 4k pages. If hash_page is
trying to insert a 4k page for a Linux PTE and it sees that it has
already been inserted as a 64k page, it first invalidates the 64k HPTE
before inserting the 4k HPTE. The hash invalidation routines also use
the _PAGE_COMBO bit, to determine whether to look for a 64k HPTE or a
set of 4k HPTEs to remove. With those two changes, we can tolerate a
mix of 4k and 64k HPTEs in the hash table, and they will all get
removed when the address space is torn down.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index abfaabf667bf..8548dcf8ef8b 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -59,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear) | |||
59 | li r11,0 | 59 | li r11,0 |
60 | b slb_finish_load | 60 | b slb_finish_load |
61 | 61 | ||
62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below | 62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below |
63 | * will be patched by the kernel at boot | 63 | * will be patched by the kernel at boot |
64 | */ | 64 | */ |
65 | _GLOBAL(slb_miss_kernel_load_virtual) | 65 | BEGIN_FTR_SECTION |
66 | /* check whether this is in vmalloc or ioremap space */ | ||
67 | clrldi r11,r10,48 | ||
68 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 | ||
69 | bgt 5f | ||
70 | lhz r11,PACAVMALLOCSLLP(r13) | ||
71 | b slb_finish_load | ||
72 | 5: | ||
73 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | ||
74 | _GLOBAL(slb_miss_kernel_load_io) | ||
66 | li r11,0 | 75 | li r11,0 |
67 | b slb_finish_load | 76 | b slb_finish_load |
68 | 77 | ||
@@ -96,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge) | |||
96 | 1: | 105 | 1: |
97 | #endif /* CONFIG_HUGETLB_PAGE */ | 106 | #endif /* CONFIG_HUGETLB_PAGE */ |
98 | 107 | ||
99 | _GLOBAL(slb_miss_user_load_normal) | 108 | lhz r11,PACACONTEXTSLLP(r13) |
100 | li r11,0 | ||
101 | |||
102 | 2: | 109 | 2: |
103 | ld r9,PACACONTEXTID(r13) | 110 | ld r9,PACACONTEXTID(r13) |
104 | rldimi r10,r9,USER_ESID_BITS,0 | 111 | rldimi r10,r9,USER_ESID_BITS,0 |