diff options
-rw-r--r-- | arch/ppc64/mm/slb.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c index 6a20773f695d..244150a0bc18 100644 --- a/arch/ppc64/mm/slb.c +++ b/arch/ppc64/mm/slb.c | |||
@@ -33,8 +33,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) | |||
33 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; | 33 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void create_slbe(unsigned long ea, unsigned long vsid, | 36 | static inline void create_slbe(unsigned long ea, unsigned long flags, |
37 | unsigned long flags, unsigned long entry) | 37 | unsigned long entry) |
38 | { | 38 | { |
39 | asm volatile("slbmte %0,%1" : | 39 | asm volatile("slbmte %0,%1" : |
40 | : "r" (mk_vsid_data(ea, flags)), | 40 | : "r" (mk_vsid_data(ea, flags)), |
@@ -145,9 +145,8 @@ void slb_initialize(void) | |||
145 | asm volatile("isync":::"memory"); | 145 | asm volatile("isync":::"memory"); |
146 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | 146 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); |
147 | asm volatile("isync; slbia; isync":::"memory"); | 147 | asm volatile("isync; slbia; isync":::"memory"); |
148 | create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0); | 148 | create_slbe(KERNELBASE, flags, 0); |
149 | create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE), | 149 | create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1); |
150 | SLB_VSID_KERNEL, 1); | ||
151 | /* We don't bolt the stack for the time being - we're in boot, | 150 | /* We don't bolt the stack for the time being - we're in boot, |
152 | * so the stack is in the bolted segment. By the time it goes | 151 | * so the stack is in the bolted segment. By the time it goes |
153 | * elsewhere, we'll call _switch() which will bolt in the new | 152 | * elsewhere, we'll call _switch() which will bolt in the new |