aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-12-05 11:24:33 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:51:54 -0500
commitb5666f70395016a55cc9d57826508b8a346398d0 (patch)
tree39d74718c2bc3f2fcba6456fdc39a6a0de3d78bd /arch/powerpc/kernel
parent51fae6de24da57bc6cdaa1b253595c3513ecbf2d (diff)
[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET
This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't looked at any of the PPC32 code, if we ever want to support Kdump on PPC we'll have to do another audit, ditto for iSeries. This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1 gazillion for 64-bit. To get a physical address from a virtual one you subtract PAGE_OFFSET, _not_ KERNELBASE. KERNELBASE is the virtual address of the start of the kernel, it's often the same as PAGE_OFFSET, but _might not be_. If you want to know something's offset from the start of the kernel you should subtract KERNELBASE. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/btext.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/lparmap.c6
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c5
4 files changed, 9 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 893dd24a9f67..5de0d80ca2f2 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -60,7 +60,7 @@ int force_printk_to_btext = 0;
60 * 60 *
61 * The display is mapped to virtual address 0xD0000000, rather 61 * The display is mapped to virtual address 0xD0000000, rather
62 * than 1:1, because some some CHRP machines put the frame buffer 62 * than 1:1, because some some CHRP machines put the frame buffer
63 * in the region starting at 0xC0000000 (KERNELBASE). 63 * in the region starting at 0xC0000000 (PAGE_OFFSET).
64 * This mapping is temporary and will disappear as soon as the 64 * This mapping is temporary and will disappear as soon as the
65 * setup done by MMU_Init() is applied. 65 * setup done by MMU_Init() is applied.
66 * 66 *
@@ -71,7 +71,7 @@ int force_printk_to_btext = 0;
71 */ 71 */
72void __init btext_prepare_BAT(void) 72void __init btext_prepare_BAT(void)
73{ 73{
74 unsigned long vaddr = KERNELBASE + 0x10000000; 74 unsigned long vaddr = PAGE_OFFSET + 0x10000000;
75 unsigned long addr; 75 unsigned long addr;
76 unsigned long lowbits; 76 unsigned long lowbits;
77 77
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 7b9397169709..aacebb33e98a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -690,7 +690,7 @@ _GLOBAL(enter_rtas)
690 690
691 /* Setup our real return addr */ 691 /* Setup our real return addr */
692 SET_REG_TO_LABEL(r4,.rtas_return_loc) 692 SET_REG_TO_LABEL(r4,.rtas_return_loc)
693 SET_REG_TO_CONST(r9,KERNELBASE) 693 SET_REG_TO_CONST(r9,PAGE_OFFSET)
694 sub r4,r4,r9 694 sub r4,r4,r9
695 mtlr r4 695 mtlr r4
696 696
@@ -718,7 +718,7 @@ _GLOBAL(enter_rtas)
718_STATIC(rtas_return_loc) 718_STATIC(rtas_return_loc)
719 /* relocation is off at this point */ 719 /* relocation is off at this point */
720 mfspr r4,SPRN_SPRG3 /* Get PACA */ 720 mfspr r4,SPRN_SPRG3 /* Get PACA */
721 SET_REG_TO_CONST(r5, KERNELBASE) 721 SET_REG_TO_CONST(r5, PAGE_OFFSET)
722 sub r4,r4,r5 /* RELOC the PACA base pointer */ 722 sub r4,r4,r5 /* RELOC the PACA base pointer */
723 723
724 mfmsr r6 724 mfmsr r6
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index 5a05a797485f..8a53d436ad9a 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
16 .xSegmentTableOffs = STAB0_PAGE, 16 .xSegmentTableOffs = STAB0_PAGE,
17 17
18 .xEsids = { 18 .xEsids = {
19 { .xKernelEsid = GET_ESID(KERNELBASE), 19 { .xKernelEsid = GET_ESID(PAGE_OFFSET),
20 .xKernelVsid = KERNEL_VSID(KERNELBASE), }, 20 .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
21 { .xKernelEsid = GET_ESID(VMALLOCBASE), 21 { .xKernelEsid = GET_ESID(VMALLOCBASE),
22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, 22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
23 }, 23 },
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
25 .xRanges = { 25 .xRanges = {
26 { .xPages = HvPagesToMap, 26 { .xPages = HvPagesToMap,
27 .xOffset = 0, 27 .xOffset = 0,
28 .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT), 28 .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
29 }, 29 },
30 }, 30 },
31}; 31};
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index ec0f06bfc24a..0b0fa4768995 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -153,9 +153,8 @@ void kexec_copy_flush(struct kimage *image)
153 * including ones that were in place on the original copy 153 * including ones that were in place on the original copy
154 */ 154 */
155 for (i = 0; i < nr_segments; i++) 155 for (i = 0; i < nr_segments; i++)
156 flush_icache_range(ranges[i].mem + KERNELBASE, 156 flush_icache_range((unsigned long)__va(ranges[i].mem),
157 ranges[i].mem + KERNELBASE + 157 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
158 ranges[i].memsz);
159} 158}
160 159
161#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP