aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb_low.S
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2005-11-06 19:06:55 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-06 19:56:47 -0500
commit3c726f8dee6f55e96475574e9f645327e461884c (patch)
treef67c381e8f57959aa4a94bda4c68e24253cd8171 /arch/powerpc/mm/slb_low.S
parentf912696ab330bf539231d1f8032320f2a08b850f (diff)
[PATCH] ppc64: support 64k pages
Adds a new CONFIG_PPC_64K_PAGES which, when enabled, changes the kernel base page size to 64K. The resulting kernel still boots on any hardware. On current machines with 4K pages support only, the kernel will maintain 16 "subpages" for each 64K page transparently. Note that while real 64K capable HW has been tested, the current patch will not enable it yet as such hardware is not released yet, and I'm still verifying with the firmware architects the proper to get the information from the newer hypervisors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
-rw-r--r--arch/powerpc/mm/slb_low.S220
1 files changed, 152 insertions, 68 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a3a03da503bc..3e18241b6f35 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -18,61 +18,28 @@
18 18
19#include <linux/config.h> 19#include <linux/config.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/ppc_asm.h> 21#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
25#include <asm/cputable.h> 23#include <asm/cputable.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/pgtable.h>
26 27
27/* void slb_allocate(unsigned long ea); 28/* void slb_allocate_realmode(unsigned long ea);
28 * 29 *
29 * Create an SLB entry for the given EA (user or kernel). 30 * Create an SLB entry for the given EA (user or kernel).
30 * r3 = faulting address, r13 = PACA 31 * r3 = faulting address, r13 = PACA
31 * r9, r10, r11 are clobbered by this function 32 * r9, r10, r11 are clobbered by this function
32 * No other registers are examined or changed. 33 * No other registers are examined or changed.
33 */ 34 */
34_GLOBAL(slb_allocate) 35_GLOBAL(slb_allocate_realmode)
35 /* 36 /* r3 = faulting address */
36 * First find a slot, round robin. Previously we tried to find
37 * a free slot first but that took too long. Unfortunately we
38 * dont have any LRU information to help us choose a slot.
39 */
40#ifdef CONFIG_PPC_ISERIES
41 /*
42 * On iSeries, the "bolted" stack segment can be cast out on
43 * shared processor switch so we need to check for a miss on
44 * it and restore it to the right slot.
45 */
46 ld r9,PACAKSAVE(r13)
47 clrrdi r9,r9,28
48 clrrdi r11,r3,28
49 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
50 cmpld r9,r11
51 beq 3f
52#endif /* CONFIG_PPC_ISERIES */
53
54 ld r10,PACASTABRR(r13)
55 addi r10,r10,1
56 /* use a cpu feature mask if we ever change our slb size */
57 cmpldi r10,SLB_NUM_ENTRIES
58
59 blt+ 4f
60 li r10,SLB_NUM_BOLTED
61
624:
63 std r10,PACASTABRR(r13)
643:
65 /* r3 = faulting address, r10 = entry */
66 37
67 srdi r9,r3,60 /* get region */ 38 srdi r9,r3,60 /* get region */
68 srdi r3,r3,28 /* get esid */ 39 srdi r10,r3,28 /* get esid */
69 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ 40 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
70 41
71 rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */ 42 /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
72 oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
73
74 /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
75
76 blt cr7,0f /* user or kernel? */ 43 blt cr7,0f /* user or kernel? */
77 44
78 /* kernel address: proto-VSID = ESID */ 45 /* kernel address: proto-VSID = ESID */
@@ -81,43 +48,161 @@ _GLOBAL(slb_allocate)
81 * top segment. That's ok, the scramble below will translate 48 * top segment. That's ok, the scramble below will translate
82 * it to VSID 0, which is reserved as a bad VSID - one which 49 * it to VSID 0, which is reserved as a bad VSID - one which
83 * will never have any pages in it. */ 50 * will never have any pages in it. */
84 li r11,SLB_VSID_KERNEL
85BEGIN_FTR_SECTION
86 bne cr7,9f
87 li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
88END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
89 b 9f
90 51
910: /* user address: proto-VSID = context<<15 | ESID */ 52 /* Check if hitting the linear mapping of the vmalloc/ioremap
92 srdi. r9,r3,USER_ESID_BITS 53 * kernel space
54 */
55 bne cr7,1f
56
57 /* Linear mapping encoding bits, the "li" instruction below will
58 * be patched by the kernel at boot
59 */
60_GLOBAL(slb_miss_kernel_load_linear)
61 li r11,0
62 b slb_finish_load
63
641: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
65 * will be patched by the kernel at boot
66 */
67_GLOBAL(slb_miss_kernel_load_virtual)
68 li r11,0
69 b slb_finish_load
70
71
720: /* user address: proto-VSID = context << 15 | ESID. First check
73 * if the address is within the boundaries of the user region
74 */
75 srdi. r9,r10,USER_ESID_BITS
93 bne- 8f /* invalid ea bits set */ 76 bne- 8f /* invalid ea bits set */
94 77
78 /* Figure out if the segment contains huge pages */
95#ifdef CONFIG_HUGETLB_PAGE 79#ifdef CONFIG_HUGETLB_PAGE
96BEGIN_FTR_SECTION 80BEGIN_FTR_SECTION
81 b 1f
82END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
97 lhz r9,PACAHIGHHTLBAREAS(r13) 83 lhz r9,PACAHIGHHTLBAREAS(r13)
98 srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT) 84 srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
99 srd r9,r9,r11 85 srd r9,r9,r11
100 lhz r11,PACALOWHTLBAREAS(r13) 86 lhz r11,PACALOWHTLBAREAS(r13)
101 srd r11,r11,r3 87 srd r11,r11,r10
102 or r9,r9,r11 88 or. r9,r9,r11
103END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 89 beq 1f
90_GLOBAL(slb_miss_user_load_huge)
91 li r11,0
92 b 2f
931:
104#endif /* CONFIG_HUGETLB_PAGE */ 94#endif /* CONFIG_HUGETLB_PAGE */
105 95
106 li r11,SLB_VSID_USER 96_GLOBAL(slb_miss_user_load_normal)
97 li r11,0
107 98
108#ifdef CONFIG_HUGETLB_PAGE 992:
109BEGIN_FTR_SECTION 100 ld r9,PACACONTEXTID(r13)
110 rldimi r11,r9,8,55 /* shift masked bit into SLB_VSID_L */ 101 rldimi r10,r9,USER_ESID_BITS,0
111END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 102 b slb_finish_load
112#endif /* CONFIG_HUGETLB_PAGE */ 103
1048: /* invalid EA */
105 li r10,0 /* BAD_VSID */
106 li r11,SLB_VSID_USER /* flags don't much matter */
107 b slb_finish_load
108
109#ifdef __DISABLED__
110
111/* void slb_allocate_user(unsigned long ea);
112 *
113 * Create an SLB entry for the given EA (user or kernel).
114 * r3 = faulting address, r13 = PACA
115 * r9, r10, r11 are clobbered by this function
116 * No other registers are examined or changed.
117 *
118 * It is called with translation enabled in order to be able to walk the
119 * page tables. This is not currently used.
120 */
121_GLOBAL(slb_allocate_user)
122 /* r3 = faulting address */
123 srdi r10,r3,28 /* get esid */
124
125 crset 4*cr7+lt /* set "user" flag for later */
126
127 /* check if we fit in the range covered by the pagetables*/
128 srdi. r9,r3,PGTABLE_EADDR_SIZE
129 crnot 4*cr0+eq,4*cr0+eq
130 beqlr
113 131
132 /* now we need to get to the page tables in order to get the page
133 * size encoding from the PMD. In the future, we'll be able to deal
134 * with 1T segments too by getting the encoding from the PGD instead
135 */
136 ld r9,PACAPGDIR(r13)
137 cmpldi cr0,r9,0
138 beqlr
139 rlwinm r11,r10,8,25,28
140 ldx r9,r9,r11 /* get pgd_t */
141 cmpldi cr0,r9,0
142 beqlr
143 rlwinm r11,r10,3,17,28
144 ldx r9,r9,r11 /* get pmd_t */
145 cmpldi cr0,r9,0
146 beqlr
147
148 /* build vsid flags */
149 andi. r11,r9,SLB_VSID_LLP
150 ori r11,r11,SLB_VSID_USER
151
152 /* get context to calculate proto-VSID */
114 ld r9,PACACONTEXTID(r13) 153 ld r9,PACACONTEXTID(r13)
115 rldimi r3,r9,USER_ESID_BITS,0 154 rldimi r10,r9,USER_ESID_BITS,0
155
156 /* fall through slb_finish_load */
157
158#endif /* __DISABLED__ */
116 159
1179: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
118 ASM_VSID_SCRAMBLE(r3,r9)
119 160
120 rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 161/*
162 * Finish loading of an SLB entry and return
163 *
164 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
165 */
166slb_finish_load:
167 ASM_VSID_SCRAMBLE(r10,r9)
168 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
169
170 /* r3 = EA, r11 = VSID data */
171 /*
172 * Find a slot, round robin. Previously we tried to find a
173 * free slot first but that took too long. Unfortunately we
174 * dont have any LRU information to help us choose a slot.
175 */
176#ifdef CONFIG_PPC_ISERIES
177 /*
178 * On iSeries, the "bolted" stack segment can be cast out on
179 * shared processor switch so we need to check for a miss on
180 * it and restore it to the right slot.
181 */
182 ld r9,PACAKSAVE(r13)
183 clrrdi r9,r9,28
184 clrrdi r3,r3,28
185 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
186 cmpld r9,r3
187 beq 3f
188#endif /* CONFIG_PPC_ISERIES */
189
190 ld r10,PACASTABRR(r13)
191 addi r10,r10,1
192 /* use a cpu feature mask if we ever change our slb size */
193 cmpldi r10,SLB_NUM_ENTRIES
194
195 blt+ 4f
196 li r10,SLB_NUM_BOLTED
197
1984:
199 std r10,PACASTABRR(r13)
200
2013:
202 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
203 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
204
205 /* r3 = ESID data, r11 = VSID data */
121 206
122 /* 207 /*
123 * No need for an isync before or after this slbmte. The exception 208 * No need for an isync before or after this slbmte. The exception
@@ -125,7 +210,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
125 */ 210 */
126 slbmte r11,r10 211 slbmte r11,r10
127 212
128 bgelr cr7 /* we're done for kernel addresses */ 213 /* we're done for kernel addresses */
214 crclr 4*cr0+eq /* set result to "success" */
215 bgelr cr7
129 216
130 /* Update the slb cache */ 217 /* Update the slb cache */
131 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ 218 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
@@ -143,9 +230,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
143 li r3,SLB_CACHE_ENTRIES+1 230 li r3,SLB_CACHE_ENTRIES+1
1442: 2312:
145 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ 232 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
233 crclr 4*cr0+eq /* set result to "success" */
146 blr 234 blr
147 235
1488: /* invalid EA */
149 li r3,0 /* BAD_VSID */
150 li r11,SLB_VSID_USER /* flags don't much matter */
151 b 9b