aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage.c42
-rw-r--r--arch/powerpc/mm/imalloc.c2
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/mem.c13
-rw-r--r--arch/powerpc/mm/numa.c139
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/mm/slb_low.S6
-rw-r--r--arch/powerpc/mm/stab.c16
-rw-r--r--arch/powerpc/mm/tlb_64.c2
11 files changed, 189 insertions, 66 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 93d4fbfdb724..a4815d316722 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -81,7 +81,8 @@ static int store_updates_sp(struct pt_regs *regs)
81} 81}
82 82
83#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 83#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
84static void do_dabr(struct pt_regs *regs, unsigned long error_code) 84static void do_dabr(struct pt_regs *regs, unsigned long address,
85 unsigned long error_code)
85{ 86{
86 siginfo_t info; 87 siginfo_t info;
87 88
@@ -99,7 +100,7 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code)
99 info.si_signo = SIGTRAP; 100 info.si_signo = SIGTRAP;
100 info.si_errno = 0; 101 info.si_errno = 0;
101 info.si_code = TRAP_HWBKPT; 102 info.si_code = TRAP_HWBKPT;
102 info.si_addr = (void __user *)regs->nip; 103 info.si_addr = (void __user *)address;
103 force_sig_info(SIGTRAP, &info, current); 104 force_sig_info(SIGTRAP, &info, current);
104} 105}
105#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ 106#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
@@ -159,7 +160,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
159#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 160#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
160 if (error_code & DSISR_DABRMATCH) { 161 if (error_code & DSISR_DABRMATCH) {
161 /* DABR match */ 162 /* DABR match */
162 do_dabr(regs, error_code); 163 do_dabr(regs, address, error_code);
163 return 0; 164 return 0;
164 } 165 }
165#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ 166#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a606504678bd..5bb433cbe41b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -456,7 +456,7 @@ void __init htab_initialize(void)
456 456
457 /* create bolted the linear mapping in the hash table */ 457 /* create bolted the linear mapping in the hash table */
458 for (i=0; i < lmb.memory.cnt; i++) { 458 for (i=0; i < lmb.memory.cnt; i++) {
459 base = lmb.memory.region[i].base + KERNELBASE; 459 base = (unsigned long)__va(lmb.memory.region[i].base);
460 size = lmb.memory.region[i].size; 460 size = lmb.memory.region[i].size;
461 461
462 DBG("creating mapping for region: %lx : %lx\n", base, size); 462 DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
498 * for either 4K or 16MB pages. 498 * for either 4K or 16MB pages.
499 */ 499 */
500 if (tce_alloc_start) { 500 if (tce_alloc_start) {
501 tce_alloc_start += KERNELBASE; 501 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
502 tce_alloc_end += KERNELBASE; 502 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
503 503
504 if (base + size >= tce_alloc_start) 504 if (base + size >= tce_alloc_start)
505 tce_alloc_start = base + size + 1; 505 tce_alloc_start = base + size + 1;
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
644 DBG_LOW(" -> rc=%d\n", rc); 644 DBG_LOW(" -> rc=%d\n", rc);
645 return rc; 645 return rc;
646} 646}
647EXPORT_SYMBOL_GPL(hash_page);
647 648
648void hash_preload(struct mm_struct *mm, unsigned long ea, 649void hash_preload(struct mm_struct *mm, unsigned long ea,
649 unsigned long access, unsigned long trap) 650 unsigned long access, unsigned long trap)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 54131b877da3..b51bb28c054b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -549,6 +549,17 @@ fail:
549 return addr; 549 return addr;
550} 550}
551 551
552static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
553{
554 struct vm_area_struct *vma;
555
556 vma = find_vma(current->mm, addr);
557 if (!vma || ((addr + len) <= vma->vm_start))
558 return 0;
559
560 return -ENOMEM;
561}
562
552static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) 563static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
553{ 564{
554 unsigned long addr = 0; 565 unsigned long addr = 0;
@@ -618,15 +629,28 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
618 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 629 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
619 return -EINVAL; 630 return -EINVAL;
620 631
632 /* Paranoia, caller should have dealt with this */
633 BUG_ON((addr + len) < addr);
634
621 if (test_thread_flag(TIF_32BIT)) { 635 if (test_thread_flag(TIF_32BIT)) {
636 /* Paranoia, caller should have dealt with this */
637 BUG_ON((addr + len) > 0x100000000UL);
638
622 curareas = current->mm->context.low_htlb_areas; 639 curareas = current->mm->context.low_htlb_areas;
623 640
624 /* First see if we can do the mapping in the existing 641 /* First see if we can use the hint address */
625 * low areas */ 642 if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
643 areamask = LOW_ESID_MASK(addr, len);
644 if (open_low_hpage_areas(current->mm, areamask) == 0)
645 return addr;
646 }
647
648 /* Next see if we can map in the existing low areas */
626 addr = htlb_get_low_area(len, curareas); 649 addr = htlb_get_low_area(len, curareas);
627 if (addr != -ENOMEM) 650 if (addr != -ENOMEM)
628 return addr; 651 return addr;
629 652
653 /* Finally go looking for areas to open */
630 lastshift = 0; 654 lastshift = 0;
631 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); 655 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
632 ! lastshift; areamask >>=1) { 656 ! lastshift; areamask >>=1) {
@@ -641,12 +665,22 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
641 } else { 665 } else {
642 curareas = current->mm->context.high_htlb_areas; 666 curareas = current->mm->context.high_htlb_areas;
643 667
644 /* First see if we can do the mapping in the existing 668 /* First see if we can use the hint address */
645 * high areas */ 669 /* We discourage 64-bit processes from doing hugepage
670 * mappings below 4GB (must use MAP_FIXED) */
671 if ((addr >= 0x100000000UL)
672 && (htlb_check_hinted_area(addr, len) == 0)) {
673 areamask = HTLB_AREA_MASK(addr, len);
674 if (open_high_hpage_areas(current->mm, areamask) == 0)
675 return addr;
676 }
677
678 /* Next see if we can map in the existing high areas */
646 addr = htlb_get_high_area(len, curareas); 679 addr = htlb_get_high_area(len, curareas);
647 if (addr != -ENOMEM) 680 if (addr != -ENOMEM)
648 return addr; 681 return addr;
649 682
683 /* Finally go looking for areas to open */
650 lastshift = 0; 684 lastshift = 0;
651 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); 685 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
652 ! lastshift; areamask >>=1) { 686 ! lastshift; areamask >>=1) {
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index f9587bcc6a48..8b0c132bc163 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -107,6 +107,7 @@ static int im_region_status(unsigned long v_addr, unsigned long size,
107 if (v_addr < (unsigned long) tmp->addr + tmp->size) 107 if (v_addr < (unsigned long) tmp->addr + tmp->size)
108 break; 108 break;
109 109
110 *vm = NULL;
110 if (tmp) { 111 if (tmp) {
111 if (im_region_overlaps(v_addr, size, tmp)) 112 if (im_region_overlaps(v_addr, size, tmp))
112 return IM_REGION_OVERLAP; 113 return IM_REGION_OVERLAP;
@@ -127,7 +128,6 @@ static int im_region_status(unsigned long v_addr, unsigned long size,
127 } 128 }
128 } 129 }
129 130
130 *vm = NULL;
131 return IM_REGION_UNUSED; 131 return IM_REGION_UNUSED;
132} 132}
133 133
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 7d4b8b5f0606..7d0d75c11848 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -188,6 +188,11 @@ void __init MMU_init(void)
188 188
189 if (ppc_md.progress) 189 if (ppc_md.progress)
190 ppc_md.progress("MMU:exit", 0x211); 190 ppc_md.progress("MMU:exit", 0x211);
191
192 /* From now on, btext is no longer BAT mapped if it was at all */
193#ifdef CONFIG_BOOTX_TEXT
194 btext_unmap();
195#endif
191} 196}
192 197
193/* This is only called until mem_init is done. */ 198/* This is only called until mem_init is done. */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ed6ed2e30dac..15aac0d78dfa 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -114,19 +114,18 @@ void online_page(struct page *page)
114 num_physpages++; 114 num_physpages++;
115} 115}
116 116
117/*
118 * This works only for the non-NUMA case. Later, we'll need a lookup
119 * to convert from real physical addresses to nid, that doesn't use
120 * pfn_to_nid().
121 */
122int __devinit add_memory(u64 start, u64 size) 117int __devinit add_memory(u64 start, u64 size)
123{ 118{
124 struct pglist_data *pgdata = NODE_DATA(0); 119 struct pglist_data *pgdata;
125 struct zone *zone; 120 struct zone *zone;
121 int nid;
126 unsigned long start_pfn = start >> PAGE_SHIFT; 122 unsigned long start_pfn = start >> PAGE_SHIFT;
127 unsigned long nr_pages = size >> PAGE_SHIFT; 123 unsigned long nr_pages = size >> PAGE_SHIFT;
128 124
129 start += KERNELBASE; 125 nid = hot_add_scn_to_nid(start);
126 pgdata = NODE_DATA(nid);
127
128 start = __va(start);
130 create_section_mapping(start, start + size); 129 create_section_mapping(start, start + size);
131 130
132 /* this should work for most non-highmem platforms */ 131 /* this should work for most non-highmem platforms */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index ba7a3055a9fc..2863a912bcd0 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(node_data);
37 37
38static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; 38static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
39static int min_common_depth; 39static int min_common_depth;
40static int n_mem_addr_cells, n_mem_size_cells;
40 41
41/* 42/*
42 * We need somewhere to store start/end/node for each region until we have 43 * We need somewhere to store start/end/node for each region until we have
@@ -254,32 +255,20 @@ static int __init find_min_common_depth(void)
254 return depth; 255 return depth;
255} 256}
256 257
257static int __init get_mem_addr_cells(void) 258static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
258{ 259{
259 struct device_node *memory = NULL; 260 struct device_node *memory = NULL;
260 int rc;
261 261
262 memory = of_find_node_by_type(memory, "memory"); 262 memory = of_find_node_by_type(memory, "memory");
263 if (!memory) 263 if (!memory)
264 return 0; /* it won't matter */ 264 panic("numa.c: No memory nodes found!");
265 265
266 rc = prom_n_addr_cells(memory); 266 *n_addr_cells = prom_n_addr_cells(memory);
267 return rc; 267 *n_size_cells = prom_n_size_cells(memory);
268 of_node_put(memory);
268} 269}
269 270
270static int __init get_mem_size_cells(void) 271static unsigned long __devinit read_n_cells(int n, unsigned int **buf)
271{
272 struct device_node *memory = NULL;
273 int rc;
274
275 memory = of_find_node_by_type(memory, "memory");
276 if (!memory)
277 return 0; /* it won't matter */
278 rc = prom_n_size_cells(memory);
279 return rc;
280}
281
282static unsigned long __init read_n_cells(int n, unsigned int **buf)
283{ 272{
284 unsigned long result = 0; 273 unsigned long result = 0;
285 274
@@ -386,7 +375,6 @@ static int __init parse_numa_properties(void)
386{ 375{
387 struct device_node *cpu = NULL; 376 struct device_node *cpu = NULL;
388 struct device_node *memory = NULL; 377 struct device_node *memory = NULL;
389 int addr_cells, size_cells;
390 int max_domain; 378 int max_domain;
391 unsigned long i; 379 unsigned long i;
392 380
@@ -425,8 +413,7 @@ static int __init parse_numa_properties(void)
425 } 413 }
426 } 414 }
427 415
428 addr_cells = get_mem_addr_cells(); 416 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
429 size_cells = get_mem_size_cells();
430 memory = NULL; 417 memory = NULL;
431 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 418 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
432 unsigned long start; 419 unsigned long start;
@@ -436,15 +423,21 @@ static int __init parse_numa_properties(void)
436 unsigned int *memcell_buf; 423 unsigned int *memcell_buf;
437 unsigned int len; 424 unsigned int len;
438 425
439 memcell_buf = (unsigned int *)get_property(memory, "reg", &len); 426 memcell_buf = (unsigned int *)get_property(memory,
427 "linux,usable-memory", &len);
428 if (!memcell_buf || len <= 0)
429 memcell_buf =
430 (unsigned int *)get_property(memory, "reg",
431 &len);
440 if (!memcell_buf || len <= 0) 432 if (!memcell_buf || len <= 0)
441 continue; 433 continue;
442 434
443 ranges = memory->n_addrs; 435 /* ranges in cell */
436 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
444new_range: 437new_range:
445 /* these are order-sensitive, and modify the buffer pointer */ 438 /* these are order-sensitive, and modify the buffer pointer */
446 start = read_n_cells(addr_cells, &memcell_buf); 439 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
447 size = read_n_cells(size_cells, &memcell_buf); 440 size = read_n_cells(n_mem_size_cells, &memcell_buf);
448 441
449 numa_domain = of_node_numa_domain(memory); 442 numa_domain = of_node_numa_domain(memory);
450 443
@@ -497,7 +490,41 @@ static void __init setup_nonnuma(void)
497 node_set_online(0); 490 node_set_online(0);
498} 491}
499 492
500static void __init dump_numa_topology(void) 493void __init dump_numa_cpu_topology(void)
494{
495 unsigned int node;
496 unsigned int cpu, count;
497
498 if (min_common_depth == -1 || !numa_enabled)
499 return;
500
501 for_each_online_node(node) {
502 printk(KERN_INFO "Node %d CPUs:", node);
503
504 count = 0;
505 /*
506 * If we used a CPU iterator here we would miss printing
507 * the holes in the cpumap.
508 */
509 for (cpu = 0; cpu < NR_CPUS; cpu++) {
510 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
511 if (count == 0)
512 printk(" %u", cpu);
513 ++count;
514 } else {
515 if (count > 1)
516 printk("-%u", cpu - 1);
517 count = 0;
518 }
519 }
520
521 if (count > 1)
522 printk("-%u", NR_CPUS - 1);
523 printk("\n");
524 }
525}
526
527static void __init dump_numa_memory_topology(void)
501{ 528{
502 unsigned int node; 529 unsigned int node;
503 unsigned int count; 530 unsigned int count;
@@ -529,7 +556,6 @@ static void __init dump_numa_topology(void)
529 printk("-0x%lx", i); 556 printk("-0x%lx", i);
530 printk("\n"); 557 printk("\n");
531 } 558 }
532 return;
533} 559}
534 560
535/* 561/*
@@ -591,7 +617,7 @@ void __init do_init_bootmem(void)
591 if (parse_numa_properties()) 617 if (parse_numa_properties())
592 setup_nonnuma(); 618 setup_nonnuma();
593 else 619 else
594 dump_numa_topology(); 620 dump_numa_memory_topology();
595 621
596 register_cpu_notifier(&ppc64_numa_nb); 622 register_cpu_notifier(&ppc64_numa_nb);
597 623
@@ -730,3 +756,60 @@ static int __init early_numa(char *p)
730 return 0; 756 return 0;
731} 757}
732early_param("numa", early_numa); 758early_param("numa", early_numa);
759
760#ifdef CONFIG_MEMORY_HOTPLUG
761/*
762 * Find the node associated with a hot added memory section. Section
763 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
764 * sections are fully contained within a single LMB.
765 */
766int hot_add_scn_to_nid(unsigned long scn_addr)
767{
768 struct device_node *memory = NULL;
769 nodemask_t nodes;
770 int numa_domain = 0;
771
772 if (!numa_enabled || (min_common_depth < 0))
773 return numa_domain;
774
775 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
776 unsigned long start, size;
777 int ranges;
778 unsigned int *memcell_buf;
779 unsigned int len;
780
781 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
782 if (!memcell_buf || len <= 0)
783 continue;
784
785 /* ranges in cell */
786 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
787ha_new_range:
788 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
789 size = read_n_cells(n_mem_size_cells, &memcell_buf);
790 numa_domain = of_node_numa_domain(memory);
791
792 /* Domains not present at boot default to 0 */
793 if (!node_online(numa_domain))
794 numa_domain = any_online_node(NODE_MASK_ALL);
795
796 if ((scn_addr >= start) && (scn_addr < (start + size))) {
797 of_node_put(memory);
798 goto got_numa_domain;
799 }
800
801 if (--ranges) /* process all ranges in cell */
802 goto ha_new_range;
803 }
804 BUG(); /* section address should be found above */
805
806 /* Temporary code to ensure that returned node is not empty */
807got_numa_domain:
808 nodes_setall(nodes);
809 while (NODE_DATA(numa_domain)->node_spanned_pages == 0) {
810 node_clear(numa_domain, nodes);
811 numa_domain = any_online_node(nodes);
812 }
813 return numa_domain;
814}
815#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 60e852f2f8e5..ffc8ed4de62d 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
75 vflags = SLB_VSID_KERNEL | virtual_llp; 75 vflags = SLB_VSID_KERNEL | virtual_llp;
76 76
77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 77 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
78 if ((ksp_esid_data & ESID_MASK) == KERNELBASE) 78 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
79 ksp_esid_data &= ~SLB_ESID_V; 79 ksp_esid_data &= ~SLB_ESID_V;
80 80
81 /* We need to do this all in asm, so we're sure we don't touch 81 /* We need to do this all in asm, so we're sure we don't touch
@@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void)
87 /* Slot 2 - kernel stack */ 87 /* Slot 2 - kernel stack */
88 "slbmte %2,%3\n" 88 "slbmte %2,%3\n"
89 "isync" 89 "isync"
90 :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), 90 :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
91 "r"(mk_esid_data(VMALLOCBASE, 1)), 91 "r"(mk_esid_data(VMALLOC_START, 1)),
92 "r"(mk_vsid_data(ksp_esid_data, lflags)), 92 "r"(mk_vsid_data(ksp_esid_data, lflags)),
93 "r"(ksp_esid_data) 93 "r"(ksp_esid_data)
94 : "memory"); 94 : "memory");
@@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
134 else 134 else
135 unmapped_base = TASK_UNMAPPED_BASE_USER64; 135 unmapped_base = TASK_UNMAPPED_BASE_USER64;
136 136
137 if (pc >= KERNELBASE) 137 if (is_kernel_addr(pc))
138 return; 138 return;
139 slb_allocate(pc); 139 slb_allocate(pc);
140 140
141 if (GET_ESID(pc) == GET_ESID(stack)) 141 if (GET_ESID(pc) == GET_ESID(stack))
142 return; 142 return;
143 143
144 if (stack >= KERNELBASE) 144 if (is_kernel_addr(stack))
145 return; 145 return;
146 slb_allocate(stack); 146 slb_allocate(stack);
147 147
@@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
149 || (GET_ESID(stack) == GET_ESID(unmapped_base))) 149 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
150 return; 150 return;
151 151
152 if (unmapped_base >= KERNELBASE) 152 if (is_kernel_addr(unmapped_base))
153 return; 153 return;
154 slb_allocate(unmapped_base); 154 slb_allocate(unmapped_base);
155} 155}
@@ -213,10 +213,10 @@ void slb_initialize(void)
213 asm volatile("isync":::"memory"); 213 asm volatile("isync":::"memory");
214 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 214 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
215 asm volatile("isync; slbia; isync":::"memory"); 215 asm volatile("isync; slbia; isync":::"memory");
216 create_slbe(KERNELBASE, lflags, 0); 216 create_slbe(PAGE_OFFSET, lflags, 0);
217 217
218 /* VMALLOC space has 4K pages always for now */ 218 /* VMALLOC space has 4K pages always for now */
219 create_slbe(VMALLOCBASE, vflags, 1); 219 create_slbe(VMALLOC_START, vflags, 1);
220 220
221 /* We don't bolt the stack for the time being - we're in boot, 221 /* We don't bolt the stack for the time being - we're in boot,
222 * so the stack is in the bolted segment. By the time it goes 222 * so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 950ffc5848c7..d1acee38f163 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode)
37 37
38 srdi r9,r3,60 /* get region */ 38 srdi r9,r3,60 /* get region */
39 srdi r10,r3,28 /* get esid */ 39 srdi r10,r3,28 /* get esid */
40 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ 40 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
41 41
42 /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ 42 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
43 blt cr7,0f /* user or kernel? */ 43 blt cr7,0f /* user or kernel? */
44 44
45 /* kernel address: proto-VSID = ESID */ 45 /* kernel address: proto-VSID = ESID */
@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user)
166/* 166/*
167 * Finish loading of an SLB entry and return 167 * Finish loading of an SLB entry and return
168 * 168 *
169 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE 169 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
170 */ 170 */
171slb_finish_load: 171slb_finish_load:
172 ASM_VSID_SCRAMBLE(r10,r9) 172 ASM_VSID_SCRAMBLE(r10,r9)
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 51e7951414e5..82e4951826bc 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
40 unsigned long entry, group, old_esid, castout_entry, i; 40 unsigned long entry, group, old_esid, castout_entry, i;
41 unsigned int global_entry; 41 unsigned int global_entry;
42 struct stab_entry *ste, *castout_ste; 42 struct stab_entry *ste, *castout_ste;
43 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; 43 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
44 44
45 vsid_data = vsid << STE_VSID_SHIFT; 45 vsid_data = vsid << STE_VSID_SHIFT;
46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; 46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
83 } 83 }
84 84
85 /* Dont cast out the first kernel segment */ 85 /* Dont cast out the first kernel segment */
86 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) 86 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
87 break; 87 break;
88 88
89 castout_entry = (castout_entry + 1) & 0xf; 89 castout_entry = (castout_entry + 1) & 0xf;
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
122 unsigned long offset; 122 unsigned long offset;
123 123
124 /* Kernel or user address? */ 124 /* Kernel or user address? */
125 if (ea >= KERNELBASE) { 125 if (is_kernel_addr(ea)) {
126 vsid = get_kernel_vsid(ea); 126 vsid = get_kernel_vsid(ea);
127 } else { 127 } else {
128 if ((ea >= TASK_SIZE_USER64) || (! mm)) 128 if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
133 133
134 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); 134 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
135 135
136 if (ea < KERNELBASE) { 136 if (!is_kernel_addr(ea)) {
137 offset = __get_cpu_var(stab_cache_ptr); 137 offset = __get_cpu_var(stab_cache_ptr);
138 if (offset < NR_STAB_CACHE_ENTRIES) 138 if (offset < NR_STAB_CACHE_ENTRIES)
139 __get_cpu_var(stab_cache[offset++]) = stab_entry; 139 __get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
190 entry++, ste++) { 190 entry++, ste++) {
191 unsigned long ea; 191 unsigned long ea;
192 ea = ste->esid_data & ESID_MASK; 192 ea = ste->esid_data & ESID_MASK;
193 if (ea < KERNELBASE) { 193 if (!is_kernel_addr(ea)) {
194 ste->esid_data = 0; 194 ste->esid_data = 0;
195 } 195 }
196 } 196 }
@@ -251,7 +251,7 @@ void stabs_alloc(void)
251 panic("Unable to allocate segment table for CPU %d.\n", 251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu); 252 cpu);
253 253
254 newstab += KERNELBASE; 254 newstab = (unsigned long)__va(newstab);
255 255
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257 257
@@ -270,11 +270,11 @@ void stabs_alloc(void)
270 */ 270 */
271void stab_initialize(unsigned long stab) 271void stab_initialize(unsigned long stab)
272{ 272{
273 unsigned long vsid = get_kernel_vsid(KERNELBASE); 273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
274 unsigned long stabreal; 274 unsigned long stabreal;
275 275
276 asm volatile("isync; slbia; isync":::"memory"); 276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(KERNELBASE), vsid); 277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278 278
279 /* Order update */ 279 /* Order update */
280 asm volatile("sync":::"memory"); 280 asm volatile("sync":::"memory");
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 859d29a0cac5..bb3afb6e6317 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -168,7 +168,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
168 batch->mm = mm; 168 batch->mm = mm;
169 batch->psize = psize; 169 batch->psize = psize;
170 } 170 }
171 if (addr < KERNELBASE) { 171 if (!is_kernel_addr(addr)) {
172 vsid = get_vsid(mm->context.id, addr); 172 vsid = get_vsid(mm->context.id, addr);
173 WARN_ON(vsid == 0); 173 WARN_ON(vsid == 0);
174 } else 174 } else