aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 60e852f2f8e5..ffc8ed4de62d 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
75 vflags = SLB_VSID_KERNEL | virtual_llp; 75 vflags = SLB_VSID_KERNEL | virtual_llp;
76 76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78 if ((ksp_esid_data & ESID_MASK) == KERNELBASE) 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
79 ksp_esid_data &= ~SLB_ESID_V; 79 ksp_esid_data &= ~SLB_ESID_V;
80 80
81 /* We need to do this all in asm, so we're sure we don't touch 81 /* We need to do this all in asm, so we're sure we don't touch
@@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void)
87 /* Slot 2 - kernel stack */ 87 /* Slot 2 - kernel stack */
88 "slbmte %2,%3\n" 88 "slbmte %2,%3\n"
89 "isync" 89 "isync"
90 :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), 90 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
91 "r"(mk_esid_data(VMALLOCBASE, 1)), 91 "r"(mk_esid_data(VMALLOC_START, 1)),
92 "r"(mk_vsid_data(ksp_esid_data, lflags)), 92 "r"(mk_vsid_data(ksp_esid_data, lflags)),
93 "r"(ksp_esid_data) 93 "r"(ksp_esid_data)
94 : "memory"); 94 : "memory");
@@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
134 else 134 else
135 unmapped_base = TASK_UNMAPPED_BASE_USER64; 135 unmapped_base = TASK_UNMAPPED_BASE_USER64;
136 136
137 if (pc >= KERNELBASE) 137 if (is_kernel_addr(pc))
138 return; 138 return;
139 slb_allocate(pc); 139 slb_allocate(pc);
140 140
141 if (GET_ESID(pc) == GET_ESID(stack)) 141 if (GET_ESID(pc) == GET_ESID(stack))
142 return; 142 return;
143 143
144 if (stack >= KERNELBASE) 144 if (is_kernel_addr(stack))
145 return; 145 return;
146 slb_allocate(stack); 146 slb_allocate(stack);
147 147
@@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
149 || (GET_ESID(stack) == GET_ESID(unmapped_base))) 149 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
150 return; 150 return;
151 151
152 if (unmapped_base >= KERNELBASE) 152 if (is_kernel_addr(unmapped_base))
153 return; 153 return;
154 slb_allocate(unmapped_base); 154 slb_allocate(unmapped_base);
155} 155}
@@ -213,10 +213,10 @@ void slb_initialize(void)
213 asm volatile("isync":::"memory"); 213 asm volatile("isync":::"memory");
214 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 214 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
215 asm volatile("isync; slbia; isync":::"memory"); 215 asm volatile("isync; slbia; isync":::"memory");
216 create_slbe(KERNELBASE, lflags, 0); 216 create_slbe(PAGE_OFFSET, lflags, 0);
217 217
218 /* VMALLOC space has 4K pages always for now */ 218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOCBASE, vflags, 1); 219 create_slbe(VMALLOC_START, vflags, 1);
220 220
221 /* We don't bolt the stack for the time being - we're in boot, 221 /* We don't bolt the stack for the time being - we're in boot,
222 * so the stack is in the bolted segment. By the time it goes 222 * so the stack is in the bolted segment. By the time it goes