aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-12-14 00:08:40 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 23:05:47 -0500
commit14c89e7fc84ae55354b8bf12fee1b6d14f259c8a (patch)
tree83d6bbd44499d81e927bbe743f1a212ff5d30b51 /arch
parent56c8eaee65d688b526c12dca54a30276335679e5 (diff)
[PATCH] powerpc: Replace VMALLOCBASE with VMALLOC_START
On ppc64, we independently define VMALLOCBASE and VMALLOC_START to be the same thing: the start of the vmalloc() area at 0xd000000000000000. VMALLOC_START is used much more widely, including in generic code, so this patch gets rid of the extraneous VMALLOCBASE. This does require moving the definitions of region IDs from page_64.h to pgtable.h, but they don't clearly belong in the former rather than the latter, anyway. While we're moving them, clean up the definitions of the REGION_IDs: - Abolish REGION_SIZE, it was only used once, to define REGION_MASK anyway - Define the specific region ids in terms of the REGION_ID() macro. - Define KERNEL_REGION_ID in terms of PAGE_OFFSET rather than KERNELBASE. It amounts to the same thing, but conceptually this is about the region of the linear mapping (which starts at PAGE_OFFSET) rather than of the kernel text itself (which is at KERNELBASE). Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/lparmap.c4
-rw-r--r--arch/powerpc/mm/slb.c6
2 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index 8a53d436ad9a..92d947447565 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -18,8 +18,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
18 .xEsids = { 18 .xEsids = {
19 { .xKernelEsid = GET_ESID(PAGE_OFFSET), 19 { .xKernelEsid = GET_ESID(PAGE_OFFSET),
20 .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), }, 20 .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
21 { .xKernelEsid = GET_ESID(VMALLOCBASE), 21 { .xKernelEsid = GET_ESID(VMALLOC_START),
22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, 22 .xKernelVsid = KERNEL_VSID(VMALLOC_START), },
23 }, 23 },
24 24
25 .xRanges = { 25 .xRanges = {
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cc22570856af..ffc8ed4de62d 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void)
87 /* Slot 2 - kernel stack */ 87 /* Slot 2 - kernel stack */
88 "slbmte %2,%3\n" 88 "slbmte %2,%3\n"
89 "isync" 89 "isync"
90 :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), 90 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
91 "r"(mk_esid_data(VMALLOCBASE, 1)), 91 "r"(mk_esid_data(VMALLOC_START, 1)),
92 "r"(mk_vsid_data(ksp_esid_data, lflags)), 92 "r"(mk_vsid_data(ksp_esid_data, lflags)),
93 "r"(ksp_esid_data) 93 "r"(ksp_esid_data)
94 : "memory"); 94 : "memory");
@@ -216,7 +216,7 @@ void slb_initialize(void)
216 create_slbe(PAGE_OFFSET, lflags, 0); 216 create_slbe(PAGE_OFFSET, lflags, 0);
217 217
218 /* VMALLOC space has 4K pages always for now */ 218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOCBASE, vflags, 1); 219 create_slbe(VMALLOC_START, vflags, 1);
220 220
221 /* We don't bolt the stack for the time being - we're in boot, 221 /* We don't bolt the stack for the time being - we're in boot,
222 * so the stack is in the bolted segment. By the time it goes 222 * so the stack is in the bolted segment. By the time it goes