aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/stab.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/stab.c')
-rw-r--r--arch/powerpc/mm/stab.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 51e7951414e5..82e4951826bc 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
40 unsigned long entry, group, old_esid, castout_entry, i; 40 unsigned long entry, group, old_esid, castout_entry, i;
41 unsigned int global_entry; 41 unsigned int global_entry;
42 struct stab_entry *ste, *castout_ste; 42 struct stab_entry *ste, *castout_ste;
43 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; 43 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
44 44
45 vsid_data = vsid << STE_VSID_SHIFT; 45 vsid_data = vsid << STE_VSID_SHIFT;
46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; 46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
83 } 83 }
84 84
85 /* Dont cast out the first kernel segment */ 85 /* Dont cast out the first kernel segment */
86 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) 86 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
87 break; 87 break;
88 88
89 castout_entry = (castout_entry + 1) & 0xf; 89 castout_entry = (castout_entry + 1) & 0xf;
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
122 unsigned long offset; 122 unsigned long offset;
123 123
124 /* Kernel or user address? */ 124 /* Kernel or user address? */
125 if (ea >= KERNELBASE) { 125 if (is_kernel_addr(ea)) {
126 vsid = get_kernel_vsid(ea); 126 vsid = get_kernel_vsid(ea);
127 } else { 127 } else {
128 if ((ea >= TASK_SIZE_USER64) || (! mm)) 128 if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
133 133
134 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); 134 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
135 135
136 if (ea < KERNELBASE) { 136 if (!is_kernel_addr(ea)) {
137 offset = __get_cpu_var(stab_cache_ptr); 137 offset = __get_cpu_var(stab_cache_ptr);
138 if (offset < NR_STAB_CACHE_ENTRIES) 138 if (offset < NR_STAB_CACHE_ENTRIES)
139 __get_cpu_var(stab_cache[offset++]) = stab_entry; 139 __get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
190 entry++, ste++) { 190 entry++, ste++) {
191 unsigned long ea; 191 unsigned long ea;
192 ea = ste->esid_data & ESID_MASK; 192 ea = ste->esid_data & ESID_MASK;
193 if (ea < KERNELBASE) { 193 if (!is_kernel_addr(ea)) {
194 ste->esid_data = 0; 194 ste->esid_data = 0;
195 } 195 }
196 } 196 }
@@ -251,7 +251,7 @@ void stabs_alloc(void)
251 panic("Unable to allocate segment table for CPU %d.\n", 251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu); 252 cpu);
253 253
254 newstab += KERNELBASE; 254 newstab = (unsigned long)__va(newstab);
255 255
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257 257
@@ -270,11 +270,11 @@ void stabs_alloc(void)
270 */ 270 */
271void stab_initialize(unsigned long stab) 271void stab_initialize(unsigned long stab)
272{ 272{
273 unsigned long vsid = get_kernel_vsid(KERNELBASE); 273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
274 unsigned long stabreal; 274 unsigned long stabreal;
275 275
276 asm volatile("isync; slbia; isync":::"memory"); 276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(KERNELBASE), vsid); 277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278 278
279 /* Order update */ 279 /* Order update */
280 asm volatile("sync":::"memory"); 280 asm volatile("sync":::"memory");