aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/kernel/pci.c2
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c11
-rw-r--r--arch/ppc64/mm/imalloc.c5
-rw-r--r--arch/ppc64/mm/init.c1
-rw-r--r--arch/ppc64/mm/stab.c5
6 files changed, 16 insertions, 11 deletions
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index be3cc387c1ec..d786d4b6af0b 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -438,7 +438,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
438 int i; 438 int i;
439 439
440 if (page_is_ram(offset >> PAGE_SHIFT)) 440 if (page_is_ram(offset >> PAGE_SHIFT))
441 return prot; 441 return __pgprot(prot);
442 442
443 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 443 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
444 444
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 144657e0c3d5..52b6b9305341 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -320,8 +320,7 @@ static void native_flush_hash_range(unsigned long context,
320 320
321 j = 0; 321 j = 0;
322 for (i = 0; i < number; i++) { 322 for (i = 0; i < number; i++) {
323 if ((batch->addr[i] >= USER_START) && 323 if (batch->addr[i] < KERNELBASE)
324 (batch->addr[i] <= USER_END))
325 vsid = get_vsid(context, batch->addr[i]); 324 vsid = get_vsid(context, batch->addr[i]);
326 else 325 else
327 vsid = get_kernel_vsid(batch->addr[i]); 326 vsid = get_kernel_vsid(batch->addr[i]);
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index e48be12f518c..0a0f97008d02 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -298,24 +298,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
298 int local = 0; 298 int local = 0;
299 cpumask_t tmp; 299 cpumask_t tmp;
300 300
301 if ((ea & ~REGION_MASK) > EADDR_MASK)
302 return 1;
303
301 switch (REGION_ID(ea)) { 304 switch (REGION_ID(ea)) {
302 case USER_REGION_ID: 305 case USER_REGION_ID:
303 user_region = 1; 306 user_region = 1;
304 mm = current->mm; 307 mm = current->mm;
305 if ((ea > USER_END) || (! mm)) 308 if (! mm)
306 return 1; 309 return 1;
307 310
308 vsid = get_vsid(mm->context.id, ea); 311 vsid = get_vsid(mm->context.id, ea);
309 break; 312 break;
310 case IO_REGION_ID: 313 case IO_REGION_ID:
311 if (ea > IMALLOC_END)
312 return 1;
313 mm = &ioremap_mm; 314 mm = &ioremap_mm;
314 vsid = get_kernel_vsid(ea); 315 vsid = get_kernel_vsid(ea);
315 break; 316 break;
316 case VMALLOC_REGION_ID: 317 case VMALLOC_REGION_ID:
317 if (ea > VMALLOC_END)
318 return 1;
319 mm = &init_mm; 318 mm = &init_mm;
320 vsid = get_kernel_vsid(ea); 319 vsid = get_kernel_vsid(ea);
321 break; 320 break;
@@ -362,7 +361,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
362 unsigned long vsid, vpn, va, hash, secondary, slot; 361 unsigned long vsid, vpn, va, hash, secondary, slot;
363 unsigned long huge = pte_huge(pte); 362 unsigned long huge = pte_huge(pte);
364 363
365 if ((ea >= USER_START) && (ea <= USER_END)) 364 if (ea < KERNELBASE)
366 vsid = get_vsid(context, ea); 365 vsid = get_vsid(context, ea);
367 else 366 else
368 vsid = get_kernel_vsid(ea); 367 vsid = get_kernel_vsid(ea);
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index 9d92b0d9cde5..cb8727f3267a 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -14,6 +14,7 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h>
17 18
18static DECLARE_MUTEX(imlist_sem); 19static DECLARE_MUTEX(imlist_sem);
19struct vm_struct * imlist = NULL; 20struct vm_struct * imlist = NULL;
@@ -23,11 +24,11 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
23 unsigned long addr; 24 unsigned long addr;
24 struct vm_struct **p, *tmp; 25 struct vm_struct **p, *tmp;
25 26
26 addr = IMALLOC_START; 27 addr = ioremap_bot;
27 for (p = &imlist; (tmp = *p) ; p = &tmp->next) { 28 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
28 if (size + addr < (unsigned long) tmp->addr) 29 if (size + addr < (unsigned long) tmp->addr)
29 break; 30 break;
30 if ((unsigned long)tmp->addr >= IMALLOC_START) 31 if ((unsigned long)tmp->addr >= ioremap_bot)
31 addr = tmp->size + (unsigned long) tmp->addr; 32 addr = tmp->size + (unsigned long) tmp->addr;
32 if (addr > IMALLOC_END-size) 33 if (addr > IMALLOC_END-size)
33 return 1; 34 return 1;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index cf33d7ec2e29..afbf25227cb0 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -64,6 +64,7 @@
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h> 65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h>
67 68
68int mem_init_done; 69int mem_init_done;
69unsigned long ioremap_bot = IMALLOC_BASE; 70unsigned long ioremap_bot = IMALLOC_BASE;
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index 31491131d5e4..df4bbe14153c 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -19,6 +19,11 @@
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21 21
22struct stab_entry {
23 unsigned long esid_data;
24 unsigned long vsid_data;
25};
26
22/* Both the segment table and SLB code uses the following cache */ 27/* Both the segment table and SLB code uses the following cache */
23#define NR_STAB_CACHE_ENTRIES 8 28#define NR_STAB_CACHE_ENTRIES 8
24DEFINE_PER_CPU(long, stab_cache_ptr); 29DEFINE_PER_CPU(long, stab_cache_ptr);