diff options
author | Paul Mackerras <paulus@samba.org> | 2007-10-11 06:37:10 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-10-12 00:05:17 -0400 |
commit | 1189be6508d45183013ddb82b18f4934193de274 (patch) | |
tree | 58924481b4de56699e4a884dce8dc601e71cf7d1 /arch/powerpc/mm/slb_low.S | |
parent | 287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff) |
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for
user addresses of 1TB and above, on machines which support them
(currently POWER5+, POWER6 and PA6T).
We detect that the machine supports 1TB segments by looking at the
ibm,processor-segment-sizes property in the device tree.
We don't currently use 1TB segments for user addresses < 1T, since
that would effectively prevent 32-bit processes from using huge pages
unless we also had a way to revert to using 256MB segments. That
would be possible but would involve extra complications (such as
keeping track of which segment size was used when HPTEs were inserted)
and is not addressed here.
Parts of this patch were originally written by Ben Herrenschmidt.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 37 |
1 files changed, 33 insertions, 4 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index cd1a93d4948c..1328a81a84aa 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -57,7 +57,10 @@ _GLOBAL(slb_allocate_realmode) | |||
57 | */ | 57 | */ |
58 | _GLOBAL(slb_miss_kernel_load_linear) | 58 | _GLOBAL(slb_miss_kernel_load_linear) |
59 | li r11,0 | 59 | li r11,0 |
60 | BEGIN_FTR_SECTION | ||
60 | b slb_finish_load | 61 | b slb_finish_load |
62 | END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) | ||
63 | b slb_finish_load_1T | ||
61 | 64 | ||
62 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below | 65 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below |
63 | * will be patched by the kernel at boot | 66 | * will be patched by the kernel at boot |
@@ -68,13 +71,16 @@ BEGIN_FTR_SECTION | |||
68 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 | 71 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 |
69 | bgt 5f | 72 | bgt 5f |
70 | lhz r11,PACAVMALLOCSLLP(r13) | 73 | lhz r11,PACAVMALLOCSLLP(r13) |
71 | b slb_finish_load | 74 | b 6f |
72 | 5: | 75 | 5: |
73 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | 76 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) |
74 | _GLOBAL(slb_miss_kernel_load_io) | 77 | _GLOBAL(slb_miss_kernel_load_io) |
75 | li r11,0 | 78 | li r11,0 |
79 | 6: | ||
80 | BEGIN_FTR_SECTION | ||
76 | b slb_finish_load | 81 | b slb_finish_load |
77 | 82 | END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) | |
83 | b slb_finish_load_1T | ||
78 | 84 | ||
79 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | 85 | 0: /* user address: proto-VSID = context << 15 | ESID. First check |
80 | * if the address is within the boundaries of the user region | 86 | * if the address is within the boundaries of the user region |
@@ -122,7 +128,13 @@ _GLOBAL(slb_miss_kernel_load_io) | |||
122 | #endif /* CONFIG_PPC_MM_SLICES */ | 128 | #endif /* CONFIG_PPC_MM_SLICES */ |
123 | 129 | ||
124 | ld r9,PACACONTEXTID(r13) | 130 | ld r9,PACACONTEXTID(r13) |
131 | BEGIN_FTR_SECTION | ||
132 | cmpldi r10,0x1000 | ||
133 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | ||
125 | rldimi r10,r9,USER_ESID_BITS,0 | 134 | rldimi r10,r9,USER_ESID_BITS,0 |
135 | BEGIN_FTR_SECTION | ||
136 | bge slb_finish_load_1T | ||
137 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | ||
126 | b slb_finish_load | 138 | b slb_finish_load |
127 | 139 | ||
128 | 8: /* invalid EA */ | 140 | 8: /* invalid EA */ |
@@ -188,7 +200,7 @@ _GLOBAL(slb_allocate_user) | |||
188 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | 200 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
189 | */ | 201 | */ |
190 | slb_finish_load: | 202 | slb_finish_load: |
191 | ASM_VSID_SCRAMBLE(r10,r9) | 203 | ASM_VSID_SCRAMBLE(r10,r9,256M) |
192 | rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ | 204 | rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ |
193 | 205 | ||
194 | /* r3 = EA, r11 = VSID data */ | 206 | /* r3 = EA, r11 = VSID data */ |
@@ -213,7 +225,7 @@ BEGIN_FW_FTR_SECTION | |||
213 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 225 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
214 | #endif /* CONFIG_PPC_ISERIES */ | 226 | #endif /* CONFIG_PPC_ISERIES */ |
215 | 227 | ||
216 | ld r10,PACASTABRR(r13) | 228 | 7: ld r10,PACASTABRR(r13) |
217 | addi r10,r10,1 | 229 | addi r10,r10,1 |
218 | /* use a cpu feature mask if we ever change our slb size */ | 230 | /* use a cpu feature mask if we ever change our slb size */ |
219 | cmpldi r10,SLB_NUM_ENTRIES | 231 | cmpldi r10,SLB_NUM_ENTRIES |
@@ -259,3 +271,20 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |||
259 | crclr 4*cr0+eq /* set result to "success" */ | 271 | crclr 4*cr0+eq /* set result to "success" */ |
260 | blr | 272 | blr |
261 | 273 | ||
274 | /* | ||
275 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | ||
276 | * We assume legacy iSeries will never have 1T segments. | ||
277 | * | ||
278 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | ||
279 | */ | ||
280 | slb_finish_load_1T: | ||
281 | srdi r10,r10,40-28 /* get 1T ESID */ | ||
282 | ASM_VSID_SCRAMBLE(r10,r9,1T) | ||
283 | rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ | ||
284 | li r10,MMU_SEGSIZE_1T | ||
285 | rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ | ||
286 | |||
287 | /* r3 = EA, r11 = VSID data */ | ||
288 | clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */ | ||
289 | b 7b | ||
290 | |||