aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-10-11 06:37:10 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-12 00:05:17 -0400
commit1189be6508d45183013ddb82b18f4934193de274 (patch)
tree58924481b4de56699e4a884dce8dc601e71cf7d1 /arch/powerpc/mm/slb.c
parent287e5d6fcccfa38b953cebe307e1ddfd32363355 (diff)
[POWERPC] Use 1TB segments
This makes the kernel use 1TB segments for all kernel mappings and for user addresses of 1TB and above, on machines which support them (currently POWER5+, POWER6 and PA6T). We detect that the machine supports 1TB segments by looking at the ibm,processor-segment-sizes property in the device tree. We don't currently use 1TB segments for user addresses < 1T, since that would effectively prevent 32-bit processes from using huge pages unless we also had a way to revert to using 256MB segments. That would be possible but would involve extra complications (such as keeping track of which segment size was used when HPTEs were inserted) and is not addressed here. Parts of this patch were originally written by Ben Herrenschmidt. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c67
1 files changed, 41 insertions, 26 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 4bee1cfa9dea..6c164cec9d2c 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -43,17 +43,26 @@ static void slb_allocate(unsigned long ea)
43 slb_allocate_realmode(ea); 43 slb_allocate_realmode(ea);
44} 44}
45 45
46static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 46static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
47 unsigned long slot)
47{ 48{
48 return (ea & ESID_MASK) | SLB_ESID_V | slot; 49 unsigned long mask;
50
51 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
52 return (ea & mask) | SLB_ESID_V | slot;
49} 53}
50 54
51static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) 55#define slb_vsid_shift(ssize) \
56 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
57
58static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
59 unsigned long flags)
52{ 60{
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 61 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
62 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
54} 63}
55 64
56static inline void slb_shadow_update(unsigned long ea, 65static inline void slb_shadow_update(unsigned long ea, int ssize,
57 unsigned long flags, 66 unsigned long flags,
58 unsigned long entry) 67 unsigned long entry)
59{ 68{
@@ -63,8 +72,8 @@ static inline void slb_shadow_update(unsigned long ea,
63 * we only update the current CPU's SLB shadow buffer. 72 * we only update the current CPU's SLB shadow buffer.
64 */ 73 */
65 get_slb_shadow()->save_area[entry].esid = 0; 74 get_slb_shadow()->save_area[entry].esid = 0;
66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); 75 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
67 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); 76 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
68} 77}
69 78
70static inline void slb_shadow_clear(unsigned long entry) 79static inline void slb_shadow_clear(unsigned long entry)
@@ -72,7 +81,8 @@ static inline void slb_shadow_clear(unsigned long entry)
72 get_slb_shadow()->save_area[entry].esid = 0; 81 get_slb_shadow()->save_area[entry].esid = 0;
73} 82}
74 83
75static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 84static inline void create_shadowed_slbe(unsigned long ea, int ssize,
85 unsigned long flags,
76 unsigned long entry) 86 unsigned long entry)
77{ 87{
78 /* 88 /*
@@ -80,11 +90,11 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
80 * we don't get a stale entry here if we get preempted by PHYP 90 * we don't get a stale entry here if we get preempted by PHYP
81 * between these two statements. 91 * between these two statements.
82 */ 92 */
83 slb_shadow_update(ea, flags, entry); 93 slb_shadow_update(ea, ssize, flags, entry);
84 94
85 asm volatile("slbmte %0,%1" : 95 asm volatile("slbmte %0,%1" :
86 : "r" (mk_vsid_data(ea, flags)), 96 : "r" (mk_vsid_data(ea, ssize, flags)),
87 "r" (mk_esid_data(ea, entry)) 97 "r" (mk_esid_data(ea, ssize, entry))
88 : "memory" ); 98 : "memory" );
89} 99}
90 100
@@ -93,7 +103,7 @@ void slb_flush_and_rebolt(void)
93 /* If you change this make sure you change SLB_NUM_BOLTED 103 /* If you change this make sure you change SLB_NUM_BOLTED
94 * appropriately too. */ 104 * appropriately too. */
95 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 105 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
96 unsigned long ksp_esid_data; 106 unsigned long ksp_esid_data, ksp_vsid_data;
97 107
98 WARN_ON(!irqs_disabled()); 108 WARN_ON(!irqs_disabled());
99 109
@@ -102,13 +112,15 @@ void slb_flush_and_rebolt(void)
102 lflags = SLB_VSID_KERNEL | linear_llp; 112 lflags = SLB_VSID_KERNEL | linear_llp;
103 vflags = SLB_VSID_KERNEL | vmalloc_llp; 113 vflags = SLB_VSID_KERNEL | vmalloc_llp;
104 114
105 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 115 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
106 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { 116 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
107 ksp_esid_data &= ~SLB_ESID_V; 117 ksp_esid_data &= ~SLB_ESID_V;
118 ksp_vsid_data = 0;
108 slb_shadow_clear(2); 119 slb_shadow_clear(2);
109 } else { 120 } else {
110 /* Update stack entry; others don't change */ 121 /* Update stack entry; others don't change */
111 slb_shadow_update(get_paca()->kstack, lflags, 2); 122 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
123 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
112 } 124 }
113 125
114 /* We need to do this all in asm, so we're sure we don't touch 126 /* We need to do this all in asm, so we're sure we don't touch
@@ -120,9 +132,9 @@ void slb_flush_and_rebolt(void)
120 /* Slot 2 - kernel stack */ 132 /* Slot 2 - kernel stack */
121 "slbmte %2,%3\n" 133 "slbmte %2,%3\n"
122 "isync" 134 "isync"
123 :: "r"(mk_vsid_data(VMALLOC_START, vflags)), 135 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
124 "r"(mk_esid_data(VMALLOC_START, 1)), 136 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
125 "r"(mk_vsid_data(ksp_esid_data, lflags)), 137 "r"(ksp_vsid_data),
126 "r"(ksp_esid_data) 138 "r"(ksp_esid_data)
127 : "memory"); 139 : "memory");
128} 140}
@@ -132,7 +144,7 @@ void slb_vmalloc_update(void)
132 unsigned long vflags; 144 unsigned long vflags;
133 145
134 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 146 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
135 slb_shadow_update(VMALLOC_START, vflags, 1); 147 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
136 slb_flush_and_rebolt(); 148 slb_flush_and_rebolt();
137} 149}
138 150
@@ -140,7 +152,7 @@ void slb_vmalloc_update(void)
140void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 152void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
141{ 153{
142 unsigned long offset = get_paca()->slb_cache_ptr; 154 unsigned long offset = get_paca()->slb_cache_ptr;
143 unsigned long esid_data = 0; 155 unsigned long slbie_data = 0;
144 unsigned long pc = KSTK_EIP(tsk); 156 unsigned long pc = KSTK_EIP(tsk);
145 unsigned long stack = KSTK_ESP(tsk); 157 unsigned long stack = KSTK_ESP(tsk);
146 unsigned long unmapped_base; 158 unsigned long unmapped_base;
@@ -149,9 +161,12 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
149 int i; 161 int i;
150 asm volatile("isync" : : : "memory"); 162 asm volatile("isync" : : : "memory");
151 for (i = 0; i < offset; i++) { 163 for (i = 0; i < offset; i++) {
152 esid_data = ((unsigned long)get_paca()->slb_cache[i] 164 slbie_data = (unsigned long)get_paca()->slb_cache[i]
153 << SID_SHIFT) | SLBIE_C; 165 << SID_SHIFT; /* EA */
154 asm volatile("slbie %0" : : "r" (esid_data)); 166 slbie_data |= user_segment_size(slbie_data)
167 << SLBIE_SSIZE_SHIFT;
168 slbie_data |= SLBIE_C; /* C set for user addresses */
169 asm volatile("slbie %0" : : "r" (slbie_data));
155 } 170 }
156 asm volatile("isync" : : : "memory"); 171 asm volatile("isync" : : : "memory");
157 } else { 172 } else {
@@ -160,7 +175,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
160 175
161 /* Workaround POWER5 < DD2.1 issue */ 176 /* Workaround POWER5 < DD2.1 issue */
162 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 177 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
163 asm volatile("slbie %0" : : "r" (esid_data)); 178 asm volatile("slbie %0" : : "r" (slbie_data));
164 179
165 get_paca()->slb_cache_ptr = 0; 180 get_paca()->slb_cache_ptr = 0;
166 get_paca()->context = mm->context; 181 get_paca()->context = mm->context;
@@ -243,9 +258,9 @@ void slb_initialize(void)
243 asm volatile("isync":::"memory"); 258 asm volatile("isync":::"memory");
244 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 259 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
245 asm volatile("isync; slbia; isync":::"memory"); 260 asm volatile("isync; slbia; isync":::"memory");
246 create_shadowed_slbe(PAGE_OFFSET, lflags, 0); 261 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
247 262
248 create_shadowed_slbe(VMALLOC_START, vflags, 1); 263 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
249 264
250 /* We don't bolt the stack for the time being - we're in boot, 265 /* We don't bolt the stack for the time being - we're in boot,
251 * so the stack is in the bolted segment. By the time it goes 266 * so the stack is in the bolted segment. By the time it goes