aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/stab.c
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-12-05 11:24:33 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:51:54 -0500
commitb5666f70395016a55cc9d57826508b8a346398d0 (patch)
tree39d74718c2bc3f2fcba6456fdc39a6a0de3d78bd /arch/powerpc/mm/stab.c
parent51fae6de24da57bc6cdaa1b253595c3513ecbf2d (diff)
[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET
This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't looked at any of the PPC32 code, if we ever want to support Kdump on PPC we'll have to do another audit, ditto for iSeries. This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1 gazillion for 64-bit. To get a physical address from a virtual one you subtract PAGE_OFFSET, _not_ KERNELBASE. KERNELBASE is the virtual address of the start of the kernel, it's often the same as PAGE_OFFSET, but _might not be_. If you want to know something's offset from the start of the kernel you should subtract KERNELBASE. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/stab.c')
-rw-r--r--arch/powerpc/mm/stab.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index a18dab0d6b12..82e4951826bc 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
40 unsigned long entry, group, old_esid, castout_entry, i; 40 unsigned long entry, group, old_esid, castout_entry, i;
41 unsigned int global_entry; 41 unsigned int global_entry;
42 struct stab_entry *ste, *castout_ste; 42 struct stab_entry *ste, *castout_ste;
43 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; 43 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
44 44
45 vsid_data = vsid << STE_VSID_SHIFT; 45 vsid_data = vsid << STE_VSID_SHIFT;
46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; 46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
83 } 83 }
84 84
85 /* Dont cast out the first kernel segment */ 85 /* Dont cast out the first kernel segment */
86 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) 86 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
87 break; 87 break;
88 88
89 castout_entry = (castout_entry + 1) & 0xf; 89 castout_entry = (castout_entry + 1) & 0xf;
@@ -251,7 +251,7 @@ void stabs_alloc(void)
251 panic("Unable to allocate segment table for CPU %d.\n", 251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu); 252 cpu);
253 253
254 newstab += KERNELBASE; 254 newstab = (unsigned long)__va(newstab);
255 255
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257 257
@@ -270,11 +270,11 @@ void stabs_alloc(void)
270 */ 270 */
271void stab_initialize(unsigned long stab) 271void stab_initialize(unsigned long stab)
272{ 272{
273 unsigned long vsid = get_kernel_vsid(KERNELBASE); 273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
274 unsigned long stabreal; 274 unsigned long stabreal;
275 275
276 asm volatile("isync; slbia; isync":::"memory"); 276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(KERNELBASE), vsid); 277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278 278
279 /* Order update */ 279 /* Order update */
280 asm volatile("sync":::"memory"); 280 asm volatile("sync":::"memory");