aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-05-05 19:15:13 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-05 19:36:32 -0400
commit1f8d419e291f7f7f7f3ffd4f0ba00834621690c8 (patch)
tree833df93032a38bc749458ce8be3a316eae1d5215 /arch/ppc64/mm
parente685752de107201432a055f7c45c396a5b04dc17 (diff)
[PATCH] ppc64: pgtable.h and other header cleanups
This patch started as simply removing a few never-used macros from asm-ppc64/pgtable.h, then kind of grew. It now makes a bunch of cleanups to the ppc64 low-level header files (with corresponding changes to .c files where necessary) such as: - Abolishing never-used macros - Eliminating multiple #defines with the same purpose - Removing pointless macros (cases where just expanding the macro everywhere turns out clearer and more sensible) - Removing some cases where macros which could be defined in terms of each other weren't - Moving imalloc() related definitions from pgtable.h to their own header file (imalloc.h) - Re-arranging headers to group things more logically - Moving all VSID allocation related things to mmu.h, instead of being split between mmu.h and mmu_context.h - Removing some reserved space for flags from the PMD - we're not using it. - Fix some bugs which broke compile with STRICT_MM_TYPECHECKS. Signed-off-by: David Gibson <dwg@au1.ibm.com> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c11
-rw-r--r--arch/ppc64/mm/imalloc.c5
-rw-r--r--arch/ppc64/mm/init.c1
-rw-r--r--arch/ppc64/mm/stab.c5
5 files changed, 15 insertions, 10 deletions
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 144657e0c3d5..52b6b9305341 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -320,8 +320,7 @@ static void native_flush_hash_range(unsigned long context,
320 320
321 j = 0; 321 j = 0;
322 for (i = 0; i < number; i++) { 322 for (i = 0; i < number; i++) {
323 if ((batch->addr[i] >= USER_START) && 323 if (batch->addr[i] < KERNELBASE)
324 (batch->addr[i] <= USER_END))
325 vsid = get_vsid(context, batch->addr[i]); 324 vsid = get_vsid(context, batch->addr[i]);
326 else 325 else
327 vsid = get_kernel_vsid(batch->addr[i]); 326 vsid = get_kernel_vsid(batch->addr[i]);
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index e48be12f518c..0a0f97008d02 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -298,24 +298,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
298 int local = 0; 298 int local = 0;
299 cpumask_t tmp; 299 cpumask_t tmp;
300 300
301 if ((ea & ~REGION_MASK) > EADDR_MASK)
302 return 1;
303
301 switch (REGION_ID(ea)) { 304 switch (REGION_ID(ea)) {
302 case USER_REGION_ID: 305 case USER_REGION_ID:
303 user_region = 1; 306 user_region = 1;
304 mm = current->mm; 307 mm = current->mm;
305 if ((ea > USER_END) || (! mm)) 308 if (! mm)
306 return 1; 309 return 1;
307 310
308 vsid = get_vsid(mm->context.id, ea); 311 vsid = get_vsid(mm->context.id, ea);
309 break; 312 break;
310 case IO_REGION_ID: 313 case IO_REGION_ID:
311 if (ea > IMALLOC_END)
312 return 1;
313 mm = &ioremap_mm; 314 mm = &ioremap_mm;
314 vsid = get_kernel_vsid(ea); 315 vsid = get_kernel_vsid(ea);
315 break; 316 break;
316 case VMALLOC_REGION_ID: 317 case VMALLOC_REGION_ID:
317 if (ea > VMALLOC_END)
318 return 1;
319 mm = &init_mm; 318 mm = &init_mm;
320 vsid = get_kernel_vsid(ea); 319 vsid = get_kernel_vsid(ea);
321 break; 320 break;
@@ -362,7 +361,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
362 unsigned long vsid, vpn, va, hash, secondary, slot; 361 unsigned long vsid, vpn, va, hash, secondary, slot;
363 unsigned long huge = pte_huge(pte); 362 unsigned long huge = pte_huge(pte);
364 363
365 if ((ea >= USER_START) && (ea <= USER_END)) 364 if (ea < KERNELBASE)
366 vsid = get_vsid(context, ea); 365 vsid = get_vsid(context, ea);
367 else 366 else
368 vsid = get_kernel_vsid(ea); 367 vsid = get_kernel_vsid(ea);
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index 9d92b0d9cde5..cb8727f3267a 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -14,6 +14,7 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h>
17 18
18static DECLARE_MUTEX(imlist_sem); 19static DECLARE_MUTEX(imlist_sem);
19struct vm_struct * imlist = NULL; 20struct vm_struct * imlist = NULL;
@@ -23,11 +24,11 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
23 unsigned long addr; 24 unsigned long addr;
24 struct vm_struct **p, *tmp; 25 struct vm_struct **p, *tmp;
25 26
26 addr = IMALLOC_START; 27 addr = ioremap_bot;
27 for (p = &imlist; (tmp = *p) ; p = &tmp->next) { 28 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
28 if (size + addr < (unsigned long) tmp->addr) 29 if (size + addr < (unsigned long) tmp->addr)
29 break; 30 break;
30 if ((unsigned long)tmp->addr >= IMALLOC_START) 31 if ((unsigned long)tmp->addr >= ioremap_bot)
31 addr = tmp->size + (unsigned long) tmp->addr; 32 addr = tmp->size + (unsigned long) tmp->addr;
32 if (addr > IMALLOC_END-size) 33 if (addr > IMALLOC_END-size)
33 return 1; 34 return 1;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index cf33d7ec2e29..afbf25227cb0 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -64,6 +64,7 @@
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h> 65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h>
67 68
68int mem_init_done; 69int mem_init_done;
69unsigned long ioremap_bot = IMALLOC_BASE; 70unsigned long ioremap_bot = IMALLOC_BASE;
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index 31491131d5e4..df4bbe14153c 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -19,6 +19,11 @@
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21 21
22struct stab_entry {
23 unsigned long esid_data;
24 unsigned long vsid_data;
25};
26
22/* Both the segment table and SLB code uses the following cache */ 27/* Both the segment table and SLB code uses the following cache */
23#define NR_STAB_CACHE_ENTRIES 8 28#define NR_STAB_CACHE_ENTRIES 8
24DEFINE_PER_CPU(long, stab_cache_ptr); 29DEFINE_PER_CPU(long, stab_cache_ptr);