aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-12-05 11:24:33 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:51:54 -0500
commitb5666f70395016a55cc9d57826508b8a346398d0 (patch)
tree39d74718c2bc3f2fcba6456fdc39a6a0de3d78bd /arch/powerpc
parent51fae6de24da57bc6cdaa1b253595c3513ecbf2d (diff)
[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET
This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't looked at any of the PPC32 code, if we ever want to support Kdump on PPC we'll have to do another audit, ditto for iSeries. This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1 gazillion for 64-bit. To get a physical address from a virtual one you subtract PAGE_OFFSET, _not_ KERNELBASE. KERNELBASE is the virtual address of the start of the kernel, it's often the same as PAGE_OFFSET, but _might not be_. If you want to know something's offset from the start of the kernel you should subtract KERNELBASE. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/btext.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/lparmap.c6
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/slb.c4
-rw-r--r--arch/powerpc/mm/slb_low.S6
-rw-r--r--arch/powerpc/mm/stab.c10
8 files changed, 22 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 893dd24a9f67..5de0d80ca2f2 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -60,7 +60,7 @@ int force_printk_to_btext = 0;
60 * 60 *
61 * The display is mapped to virtual address 0xD0000000, rather 61 * The display is mapped to virtual address 0xD0000000, rather
62 * than 1:1, because some some CHRP machines put the frame buffer 62 * than 1:1, because some some CHRP machines put the frame buffer
63 * in the region starting at 0xC0000000 (KERNELBASE). 63 * in the region starting at 0xC0000000 (PAGE_OFFSET).
64 * This mapping is temporary and will disappear as soon as the 64 * This mapping is temporary and will disappear as soon as the
65 * setup done by MMU_Init() is applied. 65 * setup done by MMU_Init() is applied.
66 * 66 *
@@ -71,7 +71,7 @@ int force_printk_to_btext = 0;
71 */ 71 */
72void __init btext_prepare_BAT(void) 72void __init btext_prepare_BAT(void)
73{ 73{
74 unsigned long vaddr = KERNELBASE + 0x10000000; 74 unsigned long vaddr = PAGE_OFFSET + 0x10000000;
75 unsigned long addr; 75 unsigned long addr;
76 unsigned long lowbits; 76 unsigned long lowbits;
77 77
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 7b9397169709..aacebb33e98a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -690,7 +690,7 @@ _GLOBAL(enter_rtas)
690 690
691 /* Setup our real return addr */ 691 /* Setup our real return addr */
692 SET_REG_TO_LABEL(r4,.rtas_return_loc) 692 SET_REG_TO_LABEL(r4,.rtas_return_loc)
693 SET_REG_TO_CONST(r9,KERNELBASE) 693 SET_REG_TO_CONST(r9,PAGE_OFFSET)
694 sub r4,r4,r9 694 sub r4,r4,r9
695 mtlr r4 695 mtlr r4
696 696
@@ -718,7 +718,7 @@ _GLOBAL(enter_rtas)
718_STATIC(rtas_return_loc) 718_STATIC(rtas_return_loc)
719 /* relocation is off at this point */ 719 /* relocation is off at this point */
720 mfspr r4,SPRN_SPRG3 /* Get PACA */ 720 mfspr r4,SPRN_SPRG3 /* Get PACA */
721 SET_REG_TO_CONST(r5, KERNELBASE) 721 SET_REG_TO_CONST(r5, PAGE_OFFSET)
722 sub r4,r4,r5 /* RELOC the PACA base pointer */ 722 sub r4,r4,r5 /* RELOC the PACA base pointer */
723 723
724 mfmsr r6 724 mfmsr r6
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index 5a05a797485f..8a53d436ad9a 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
16 .xSegmentTableOffs = STAB0_PAGE, 16 .xSegmentTableOffs = STAB0_PAGE,
17 17
18 .xEsids = { 18 .xEsids = {
19 { .xKernelEsid = GET_ESID(KERNELBASE), 19 { .xKernelEsid = GET_ESID(PAGE_OFFSET),
20 .xKernelVsid = KERNEL_VSID(KERNELBASE), }, 20 .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
21 { .xKernelEsid = GET_ESID(VMALLOCBASE), 21 { .xKernelEsid = GET_ESID(VMALLOCBASE),
22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, 22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
23 }, 23 },
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
25 .xRanges = { 25 .xRanges = {
26 { .xPages = HvPagesToMap, 26 { .xPages = HvPagesToMap,
27 .xOffset = 0, 27 .xOffset = 0,
28 .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT), 28 .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
29 }, 29 },
30 }, 30 },
31}; 31};
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ec0f06bfc24a..0b0fa4768995 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -153,9 +153,8 @@ void kexec_copy_flush(struct kimage *image)
153 * including ones that were in place on the original copy 153 * including ones that were in place on the original copy
154 */ 154 */
155 for (i = 0; i < nr_segments; i++) 155 for (i = 0; i < nr_segments; i++)
156 flush_icache_range(ranges[i].mem + KERNELBASE, 156 flush_icache_range((unsigned long)__va(ranges[i].mem),
157 ranges[i].mem + KERNELBASE + 157 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
158 ranges[i].memsz);
159} 158}
160 159
161#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 846a1894cf95..5bb433cbe41b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -456,7 +456,7 @@ void __init htab_initialize(void)
456 456
457 /* create bolted the linear mapping in the hash table */ 457 /* create bolted the linear mapping in the hash table */
458 for (i=0; i < lmb.memory.cnt; i++) { 458 for (i=0; i < lmb.memory.cnt; i++) {
459 base = lmb.memory.region[i].base + KERNELBASE; 459 base = (unsigned long)__va(lmb.memory.region[i].base);
460 size = lmb.memory.region[i].size; 460 size = lmb.memory.region[i].size;
461 461
462 DBG("creating mapping for region: %lx : %lx\n", base, size); 462 DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
498 * for either 4K or 16MB pages. 498 * for either 4K or 16MB pages.
499 */ 499 */
500 if (tce_alloc_start) { 500 if (tce_alloc_start) {
501 tce_alloc_start += KERNELBASE; 501 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
502 tce_alloc_end += KERNELBASE; 502 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
503 503
504 if (base + size >= tce_alloc_start) 504 if (base + size >= tce_alloc_start)
505 tce_alloc_start = base + size + 1; 505 tce_alloc_start = base + size + 1;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a47b273600ec..cc22570856af 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
75 vflags = SLB_VSID_KERNEL | virtual_llp; 75 vflags = SLB_VSID_KERNEL | virtual_llp;
76 76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78 if ((ksp_esid_data & ESID_MASK) == KERNELBASE) 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
79 ksp_esid_data &= ~SLB_ESID_V; 79 ksp_esid_data &= ~SLB_ESID_V;
80 80
81 /* We need to do this all in asm, so we're sure we don't touch 81 /* We need to do this all in asm, so we're sure we don't touch
@@ -213,7 +213,7 @@ void slb_initialize(void)
213 asm volatile("isync":::"memory"); 213 asm volatile("isync":::"memory");
214 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 214 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
215 asm volatile("isync; slbia; isync":::"memory"); 215 asm volatile("isync; slbia; isync":::"memory");
216 create_slbe(KERNELBASE, lflags, 0); 216 create_slbe(PAGE_OFFSET, lflags, 0);
217 217
218 /* VMALLOC space has 4K pages always for now */ 218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOCBASE, vflags, 1); 219 create_slbe(VMALLOCBASE, vflags, 1);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 950ffc5848c7..d1acee38f163 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode)
37 37
38 srdi r9,r3,60 /* get region */ 38 srdi r9,r3,60 /* get region */
39 srdi r10,r3,28 /* get esid */ 39 srdi r10,r3,28 /* get esid */
40 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ 40 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
41 41
42 /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ 42 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
43 blt cr7,0f /* user or kernel? */ 43 blt cr7,0f /* user or kernel? */
44 44
45 /* kernel address: proto-VSID = ESID */ 45 /* kernel address: proto-VSID = ESID */
@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user)
166/* 166/*
167 * Finish loading of an SLB entry and return 167 * Finish loading of an SLB entry and return
168 * 168 *
169 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE 169 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
170 */ 170 */
171slb_finish_load: 171slb_finish_load:
172 ASM_VSID_SCRAMBLE(r10,r9) 172 ASM_VSID_SCRAMBLE(r10,r9)
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index a18dab0d6b12..82e4951826bc 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
40 unsigned long entry, group, old_esid, castout_entry, i; 40 unsigned long entry, group, old_esid, castout_entry, i;
41 unsigned int global_entry; 41 unsigned int global_entry;
42 struct stab_entry *ste, *castout_ste; 42 struct stab_entry *ste, *castout_ste;
43 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; 43 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
44 44
45 vsid_data = vsid << STE_VSID_SHIFT; 45 vsid_data = vsid << STE_VSID_SHIFT;
46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; 46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
83 } 83 }
84 84
85 /* Dont cast out the first kernel segment */ 85 /* Dont cast out the first kernel segment */
86 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) 86 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
87 break; 87 break;
88 88
89 castout_entry = (castout_entry + 1) & 0xf; 89 castout_entry = (castout_entry + 1) & 0xf;
@@ -251,7 +251,7 @@ void stabs_alloc(void)
251 panic("Unable to allocate segment table for CPU %d.\n", 251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu); 252 cpu);
253 253
254 newstab += KERNELBASE; 254 newstab = (unsigned long)__va(newstab);
255 255
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257 257
@@ -270,11 +270,11 @@ void stabs_alloc(void)
270 */ 270 */
271void stab_initialize(unsigned long stab) 271void stab_initialize(unsigned long stab)
272{ 272{
273 unsigned long vsid = get_kernel_vsid(KERNELBASE); 273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
274 unsigned long stabreal; 274 unsigned long stabreal;
275 275
276 asm volatile("isync; slbia; isync":::"memory"); 276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(KERNELBASE), vsid); 277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278 278
279 /* Order update */ 279 /* Order update */
280 asm volatile("sync":::"memory"); 280 asm volatile("sync":::"memory");