aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-12-05 11:24:33 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:51:54 -0500
commitb5666f70395016a55cc9d57826508b8a346398d0 (patch)
tree39d74718c2bc3f2fcba6456fdc39a6a0de3d78bd /arch/powerpc/mm
parent51fae6de24da57bc6cdaa1b253595c3513ecbf2d (diff)
[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET
This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't looked at any of the PPC32 code, if we ever want to support Kdump on PPC we'll have to do another audit, ditto for iSeries. This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1 gazillion for 64-bit. To get a physical address from a virtual one you subtract PAGE_OFFSET, _not_ KERNELBASE. KERNELBASE is the virtual address of the start of the kernel, it's often the same as PAGE_OFFSET, but _might not be_. If you want to know something's offset from the start of the kernel you should subtract KERNELBASE. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/slb.c4
-rw-r--r--arch/powerpc/mm/slb_low.S6
-rw-r--r--arch/powerpc/mm/stab.c10
4 files changed, 13 insertions, 13 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 846a1894cf95..5bb433cbe41b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -456,7 +456,7 @@ void __init htab_initialize(void)
456 456
457 /* create bolted the linear mapping in the hash table */ 457 /* create bolted the linear mapping in the hash table */
458 for (i=0; i < lmb.memory.cnt; i++) { 458 for (i=0; i < lmb.memory.cnt; i++) {
459 base = lmb.memory.region[i].base + KERNELBASE; 459 base = (unsigned long)__va(lmb.memory.region[i].base);
460 size = lmb.memory.region[i].size; 460 size = lmb.memory.region[i].size;
461 461
462 DBG("creating mapping for region: %lx : %lx\n", base, size); 462 DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
498 * for either 4K or 16MB pages. 498 * for either 4K or 16MB pages.
499 */ 499 */
500 if (tce_alloc_start) { 500 if (tce_alloc_start) {
501 tce_alloc_start += KERNELBASE; 501 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
502 tce_alloc_end += KERNELBASE; 502 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
503 503
504 if (base + size >= tce_alloc_start) 504 if (base + size >= tce_alloc_start)
505 tce_alloc_start = base + size + 1; 505 tce_alloc_start = base + size + 1;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a47b273600ec..cc22570856af 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
75 vflags = SLB_VSID_KERNEL | virtual_llp; 75 vflags = SLB_VSID_KERNEL | virtual_llp;
76 76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78 if ((ksp_esid_data & ESID_MASK) == KERNELBASE) 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
79 ksp_esid_data &= ~SLB_ESID_V; 79 ksp_esid_data &= ~SLB_ESID_V;
80 80
81 /* We need to do this all in asm, so we're sure we don't touch 81 /* We need to do this all in asm, so we're sure we don't touch
@@ -213,7 +213,7 @@ void slb_initialize(void)
213 asm volatile("isync":::"memory"); 213 asm volatile("isync":::"memory");
214 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 214 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
215 asm volatile("isync; slbia; isync":::"memory"); 215 asm volatile("isync; slbia; isync":::"memory");
216 create_slbe(KERNELBASE, lflags, 0); 216 create_slbe(PAGE_OFFSET, lflags, 0);
217 217
218 /* VMALLOC space has 4K pages always for now */ 218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOCBASE, vflags, 1); 219 create_slbe(VMALLOCBASE, vflags, 1);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 950ffc5848c7..d1acee38f163 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode)
37 37
38 srdi r9,r3,60 /* get region */ 38 srdi r9,r3,60 /* get region */
39 srdi r10,r3,28 /* get esid */ 39 srdi r10,r3,28 /* get esid */
40 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ 40 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
41 41
42 /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ 42 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
43 blt cr7,0f /* user or kernel? */ 43 blt cr7,0f /* user or kernel? */
44 44
45 /* kernel address: proto-VSID = ESID */ 45 /* kernel address: proto-VSID = ESID */
@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user)
166/* 166/*
167 * Finish loading of an SLB entry and return 167 * Finish loading of an SLB entry and return
168 * 168 *
169 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE 169 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
170 */ 170 */
171slb_finish_load: 171slb_finish_load:
172 ASM_VSID_SCRAMBLE(r10,r9) 172 ASM_VSID_SCRAMBLE(r10,r9)
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index a18dab0d6b12..82e4951826bc 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
40 unsigned long entry, group, old_esid, castout_entry, i; 40 unsigned long entry, group, old_esid, castout_entry, i;
41 unsigned int global_entry; 41 unsigned int global_entry;
42 struct stab_entry *ste, *castout_ste; 42 struct stab_entry *ste, *castout_ste;
43 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; 43 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
44 44
45 vsid_data = vsid << STE_VSID_SHIFT; 45 vsid_data = vsid << STE_VSID_SHIFT;
46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; 46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
83 } 83 }
84 84
85 /* Dont cast out the first kernel segment */ 85 /* Dont cast out the first kernel segment */
86 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) 86 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
87 break; 87 break;
88 88
89 castout_entry = (castout_entry + 1) & 0xf; 89 castout_entry = (castout_entry + 1) & 0xf;
@@ -251,7 +251,7 @@ void stabs_alloc(void)
251 panic("Unable to allocate segment table for CPU %d.\n", 251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu); 252 cpu);
253 253
254 newstab += KERNELBASE; 254 newstab = (unsigned long)__va(newstab);
255 255
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257 257
@@ -270,11 +270,11 @@ void stabs_alloc(void)
270 */ 270 */
271void stab_initialize(unsigned long stab) 271void stab_initialize(unsigned long stab)
272{ 272{
273 unsigned long vsid = get_kernel_vsid(KERNELBASE); 273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
274 unsigned long stabreal; 274 unsigned long stabreal;
275 275
276 asm volatile("isync; slbia; isync":::"memory"); 276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(KERNELBASE), vsid); 277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278 278
279 /* Order update */ 279 /* Order update */
280 asm volatile("sync":::"memory"); 280 asm volatile("sync":::"memory");