aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c5
-rw-r--r--arch/powerpc/mm/fault.c181
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c19
-rw-r--r--arch/powerpc/mm/hash_utils_64.c20
-rw-r--r--arch/powerpc/mm/hugetlbpage.c9
-rw-r--r--arch/powerpc/mm/icswx.c23
-rw-r--r--arch/powerpc/mm/icswx.h6
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/slb.c6
-rw-r--r--arch/powerpc/mm/slb_low.S16
-rw-r--r--arch/powerpc/mm/stab.c9
12 files changed, 196 insertions, 104 deletions
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 329be36c0a8..6747eece84a 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
365 local_irq_save(flags); 365 local_irq_save(flags);
366 366
367 do { 367 do {
368 start = (unsigned long)kmap_atomic(page + seg_nr, 368 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
369 KM_PPC_SYNC_PAGE) + seg_offset;
370 369
371 /* Sync this buffer segment */ 370 /* Sync this buffer segment */
372 __dma_sync((void *)start, seg_size, direction); 371 __dma_sync((void *)start, seg_size, direction);
373 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); 372 kunmap_atomic((void *)start);
374 seg_nr++; 373 seg_nr++;
375 374
376 /* Calculate next buffer segment size */ 375 /* Calculate next buffer segment size */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 2f0d1b032a8..19f2f9498b2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -105,6 +105,82 @@ static int store_updates_sp(struct pt_regs *regs)
105 } 105 }
106 return 0; 106 return 0;
107} 107}
108/*
109 * do_page_fault error handling helpers
110 */
111
112#define MM_FAULT_RETURN 0
113#define MM_FAULT_CONTINUE -1
114#define MM_FAULT_ERR(sig) (sig)
115
116static int out_of_memory(struct pt_regs *regs)
117{
118 /*
119 * We ran out of memory, or some other thing happened to us that made
120 * us unable to handle the page fault gracefully.
121 */
122 up_read(&current->mm->mmap_sem);
123 if (!user_mode(regs))
124 return MM_FAULT_ERR(SIGKILL);
125 pagefault_out_of_memory();
126 return MM_FAULT_RETURN;
127}
128
129static int do_sigbus(struct pt_regs *regs, unsigned long address)
130{
131 siginfo_t info;
132
133 up_read(&current->mm->mmap_sem);
134
135 if (user_mode(regs)) {
136 info.si_signo = SIGBUS;
137 info.si_errno = 0;
138 info.si_code = BUS_ADRERR;
139 info.si_addr = (void __user *)address;
140 force_sig_info(SIGBUS, &info, current);
141 return MM_FAULT_RETURN;
142 }
143 return MM_FAULT_ERR(SIGBUS);
144}
145
146static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
147{
148 /*
149 * Pagefault was interrupted by SIGKILL. We have no reason to
150 * continue the pagefault.
151 */
152 if (fatal_signal_pending(current)) {
153 /*
154 * If we have retry set, the mmap semaphore will have
155 * alrady been released in __lock_page_or_retry(). Else
156 * we release it now.
157 */
158 if (!(fault & VM_FAULT_RETRY))
159 up_read(&current->mm->mmap_sem);
160 /* Coming from kernel, we need to deal with uaccess fixups */
161 if (user_mode(regs))
162 return MM_FAULT_RETURN;
163 return MM_FAULT_ERR(SIGKILL);
164 }
165
166 /* No fault: be happy */
167 if (!(fault & VM_FAULT_ERROR))
168 return MM_FAULT_CONTINUE;
169
170 /* Out of memory */
171 if (fault & VM_FAULT_OOM)
172 return out_of_memory(regs);
173
174 /* Bus error. x86 handles HWPOISON here, we'll add this if/when
175 * we support the feature in HW
176 */
177 if (fault & VM_FAULT_SIGBUS)
178 return do_sigbus(regs, addr);
179
180 /* We don't understand the fault code, this is fatal */
181 BUG();
182 return MM_FAULT_CONTINUE;
183}
108 184
109/* 185/*
110 * For 600- and 800-family processors, the error_code parameter is DSISR 186 * For 600- and 800-family processors, the error_code parameter is DSISR
@@ -124,11 +200,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
124{ 200{
125 struct vm_area_struct * vma; 201 struct vm_area_struct * vma;
126 struct mm_struct *mm = current->mm; 202 struct mm_struct *mm = current->mm;
127 siginfo_t info; 203 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
128 int code = SEGV_MAPERR; 204 int code = SEGV_MAPERR;
129 int is_write = 0, ret; 205 int is_write = 0;
130 int trap = TRAP(regs); 206 int trap = TRAP(regs);
131 int is_exec = trap == 0x400; 207 int is_exec = trap == 0x400;
208 int fault;
132 209
133#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 210#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
134 /* 211 /*
@@ -145,6 +222,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
145 is_write = error_code & ESR_DST; 222 is_write = error_code & ESR_DST;
146#endif /* CONFIG_4xx || CONFIG_BOOKE */ 223#endif /* CONFIG_4xx || CONFIG_BOOKE */
147 224
225 if (is_write)
226 flags |= FAULT_FLAG_WRITE;
227
148#ifdef CONFIG_PPC_ICSWX 228#ifdef CONFIG_PPC_ICSWX
149 /* 229 /*
150 * we need to do this early because this "data storage 230 * we need to do this early because this "data storage
@@ -152,13 +232,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
152 * look at it 232 * look at it
153 */ 233 */
154 if (error_code & ICSWX_DSI_UCT) { 234 if (error_code & ICSWX_DSI_UCT) {
155 int ret; 235 int rc = acop_handle_fault(regs, address, error_code);
156 236 if (rc)
157 ret = acop_handle_fault(regs, address, error_code); 237 return rc;
158 if (ret)
159 return ret;
160 } 238 }
161#endif 239#endif /* CONFIG_PPC_ICSWX */
162 240
163 if (notify_page_fault(regs)) 241 if (notify_page_fault(regs))
164 return 0; 242 return 0;
@@ -179,6 +257,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
179 } 257 }
180#endif 258#endif
181 259
260 /* We restore the interrupt state now */
261 if (!arch_irq_disabled_regs(regs))
262 local_irq_enable();
263
182 if (in_atomic() || mm == NULL) { 264 if (in_atomic() || mm == NULL) {
183 if (!user_mode(regs)) 265 if (!user_mode(regs))
184 return SIGSEGV; 266 return SIGSEGV;
@@ -212,7 +294,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
212 if (!user_mode(regs) && !search_exception_tables(regs->nip)) 294 if (!user_mode(regs) && !search_exception_tables(regs->nip))
213 goto bad_area_nosemaphore; 295 goto bad_area_nosemaphore;
214 296
297retry:
215 down_read(&mm->mmap_sem); 298 down_read(&mm->mmap_sem);
299 } else {
300 /*
301 * The above down_read_trylock() might have succeeded in
302 * which case we'll have missed the might_sleep() from
303 * down_read():
304 */
305 might_sleep();
216 } 306 }
217 307
218 vma = find_vma(mm, address); 308 vma = find_vma(mm, address);
@@ -327,30 +417,43 @@ good_area:
327 * make sure we exit gracefully rather than endlessly redo 417 * make sure we exit gracefully rather than endlessly redo
328 * the fault. 418 * the fault.
329 */ 419 */
330 ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); 420 fault = handle_mm_fault(mm, vma, address, flags);
331 if (unlikely(ret & VM_FAULT_ERROR)) { 421 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
332 if (ret & VM_FAULT_OOM) 422 int rc = mm_fault_error(regs, address, fault);
333 goto out_of_memory; 423 if (rc >= MM_FAULT_RETURN)
334 else if (ret & VM_FAULT_SIGBUS) 424 return rc;
335 goto do_sigbus;
336 BUG();
337 } 425 }
338 if (ret & VM_FAULT_MAJOR) { 426
339 current->maj_flt++; 427 /*
340 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 428 * Major/minor page fault accounting is only done on the
341 regs, address); 429 * initial attempt. If we go through a retry, it is extremely
430 * likely that the page will be found in page cache at that point.
431 */
432 if (flags & FAULT_FLAG_ALLOW_RETRY) {
433 if (fault & VM_FAULT_MAJOR) {
434 current->maj_flt++;
435 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
436 regs, address);
342#ifdef CONFIG_PPC_SMLPAR 437#ifdef CONFIG_PPC_SMLPAR
343 if (firmware_has_feature(FW_FEATURE_CMO)) { 438 if (firmware_has_feature(FW_FEATURE_CMO)) {
344 preempt_disable(); 439 preempt_disable();
345 get_lppaca()->page_ins += (1 << PAGE_FACTOR); 440 get_lppaca()->page_ins += (1 << PAGE_FACTOR);
346 preempt_enable(); 441 preempt_enable();
442 }
443#endif /* CONFIG_PPC_SMLPAR */
444 } else {
445 current->min_flt++;
446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
447 regs, address);
448 }
449 if (fault & VM_FAULT_RETRY) {
450 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
451 * of starvation. */
452 flags &= ~FAULT_FLAG_ALLOW_RETRY;
453 goto retry;
347 } 454 }
348#endif
349 } else {
350 current->min_flt++;
351 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
352 regs, address);
353 } 455 }
456
354 up_read(&mm->mmap_sem); 457 up_read(&mm->mmap_sem);
355 return 0; 458 return 0;
356 459
@@ -371,28 +474,6 @@ bad_area_nosemaphore:
371 474
372 return SIGSEGV; 475 return SIGSEGV;
373 476
374/*
375 * We ran out of memory, or some other thing happened to us that made
376 * us unable to handle the page fault gracefully.
377 */
378out_of_memory:
379 up_read(&mm->mmap_sem);
380 if (!user_mode(regs))
381 return SIGKILL;
382 pagefault_out_of_memory();
383 return 0;
384
385do_sigbus:
386 up_read(&mm->mmap_sem);
387 if (user_mode(regs)) {
388 info.si_signo = SIGBUS;
389 info.si_errno = 0;
390 info.si_code = BUS_ADRERR;
391 info.si_addr = (void __user *)address;
392 force_sig_info(SIGBUS, &info, current);
393 return 0;
394 }
395 return SIGBUS;
396} 477}
397 478
398/* 479/*
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 66a6fd38e9c..07ba45b0f07 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -149,12 +149,19 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
149unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, 149unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
150 phys_addr_t phys) 150 phys_addr_t phys)
151{ 151{
152 unsigned int camsize = __ilog2(ram) & ~1U; 152 unsigned int camsize = __ilog2(ram);
153 unsigned int align = __ffs(virt | phys) & ~1U; 153 unsigned int align = __ffs(virt | phys);
154 unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf; 154 unsigned long max_cam;
155 155
156 /* Convert (4^max) kB to (2^max) bytes */ 156 if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
157 max_cam = max_cam * 2 + 10; 157 /* Convert (4^max) kB to (2^max) bytes */
158 max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
159 camsize &= ~1U;
160 align &= ~1U;
161 } else {
162 /* Convert (2^max) kB to (2^max) bytes */
163 max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
164 }
158 165
159 if (camsize > align) 166 if (camsize > align)
160 camsize = align; 167 camsize = align;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2d282186cb4..3e8c37a4e39 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -55,6 +55,8 @@
55#include <asm/spu.h> 55#include <asm/spu.h>
56#include <asm/udbg.h> 56#include <asm/udbg.h>
57#include <asm/code-patching.h> 57#include <asm/code-patching.h>
58#include <asm/fadump.h>
59#include <asm/firmware.h>
58 60
59#ifdef DEBUG 61#ifdef DEBUG
60#define DBG(fmt...) udbg_printf(fmt) 62#define DBG(fmt...) udbg_printf(fmt)
@@ -625,6 +627,16 @@ static void __init htab_initialize(void)
625 /* Using a hypervisor which owns the htab */ 627 /* Using a hypervisor which owns the htab */
626 htab_address = NULL; 628 htab_address = NULL;
627 _SDR1 = 0; 629 _SDR1 = 0;
630#ifdef CONFIG_FA_DUMP
631 /*
632 * If firmware assisted dump is active firmware preserves
633 * the contents of htab along with entire partition memory.
634 * Clear the htab if firmware assisted dump is active so
635 * that we dont end up using old mappings.
636 */
637 if (is_fadump_active() && ppc_md.hpte_clear_all)
638 ppc_md.hpte_clear_all();
639#endif
628 } else { 640 } else {
629 /* Find storage for the HPT. Must be contiguous in 641 /* Find storage for the HPT. Must be contiguous in
630 * the absolute address space. On cell we want it to be 642 * the absolute address space. On cell we want it to be
@@ -745,12 +757,9 @@ void __init early_init_mmu(void)
745 */ 757 */
746 htab_initialize(); 758 htab_initialize();
747 759
748 /* Initialize stab / SLB management except on iSeries 760 /* Initialize stab / SLB management */
749 */
750 if (mmu_has_feature(MMU_FTR_SLB)) 761 if (mmu_has_feature(MMU_FTR_SLB))
751 slb_initialize(); 762 slb_initialize();
752 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
753 stab_initialize(get_paca()->stab_real);
754} 763}
755 764
756#ifdef CONFIG_SMP 765#ifdef CONFIG_SMP
@@ -761,8 +770,7 @@ void __cpuinit early_init_mmu_secondary(void)
761 mtspr(SPRN_SDR1, _SDR1); 770 mtspr(SPRN_SDR1, _SDR1);
762 771
763 /* Initialize STAB/SLB. We use a virtual address as it works 772 /* Initialize STAB/SLB. We use a virtual address as it works
764 * in real mode on pSeries and we want a virtual address on 773 * in real mode on pSeries.
765 * iSeries anyway
766 */ 774 */
767 if (mmu_has_feature(MMU_FTR_SLB)) 775 if (mmu_has_feature(MMU_FTR_SLB))
768 slb_initialize(); 776 slb_initialize();
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a8b3cc7d90f..fb05b123218 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/hugetlb.h> 14#include <linux/hugetlb.h>
15#include <linux/export.h>
15#include <linux/of_fdt.h> 16#include <linux/of_fdt.h>
16#include <linux/memblock.h> 17#include <linux/memblock.h>
17#include <linux/bootmem.h> 18#include <linux/bootmem.h>
@@ -103,6 +104,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
103 *shift = hugepd_shift(*hpdp); 104 *shift = hugepd_shift(*hpdp);
104 return hugepte_offset(hpdp, ea, pdshift); 105 return hugepte_offset(hpdp, ea, pdshift);
105} 106}
107EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
106 108
107pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 109pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
108{ 110{
@@ -310,7 +312,8 @@ void __init reserve_hugetlb_gpages(void)
310 int i; 312 int i;
311 313
312 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); 314 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
313 parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup); 315 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
316 &do_gpage_early_setup);
314 317
315 /* 318 /*
316 * Walk gpage list in reverse, allocating larger page sizes first. 319 * Walk gpage list in reverse, allocating larger page sizes first.
@@ -910,9 +913,9 @@ void flush_dcache_icache_hugepage(struct page *page)
910 if (!PageHighMem(page)) { 913 if (!PageHighMem(page)) {
911 __flush_dcache_icache(page_address(page+i)); 914 __flush_dcache_icache(page_address(page+i));
912 } else { 915 } else {
913 start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); 916 start = kmap_atomic(page+i);
914 __flush_dcache_icache(start); 917 __flush_dcache_icache(start);
915 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 918 kunmap_atomic(start);
916 } 919 }
917 } 920 }
918} 921}
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 5d9a59eaad9..8cdbd8634a5 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(drop_cop);
163 163
164static int acop_use_cop(int ct) 164static int acop_use_cop(int ct)
165{ 165{
166 /* todo */ 166 /* There is no alternate policy, yet */
167 return -1; 167 return -1;
168} 168}
169 169
@@ -227,11 +227,30 @@ int acop_handle_fault(struct pt_regs *regs, unsigned long address,
227 ct = (ccw >> 16) & 0x3f; 227 ct = (ccw >> 16) & 0x3f;
228 } 228 }
229 229
230 /*
231 * We could be here because another thread has enabled acop
232 * but the ACOP register has yet to be updated.
233 *
234 * This should have been taken care of by the IPI to sync all
235 * the threads (see smp_call_function(sync_cop, mm, 1)), but
236 * that could take forever if there are a significant amount
237 * of threads.
238 *
239 * Given the number of threads on some of these systems,
240 * perhaps this is the best way to sync ACOP rather than whack
241 * every thread with an IPI.
242 */
243 if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
244 sync_cop(current->active_mm);
245 return 0;
246 }
247
248 /* check for alternate policy */
230 if (!acop_use_cop(ct)) 249 if (!acop_use_cop(ct))
231 return 0; 250 return 0;
232 251
233 /* at this point the CT is unknown to the system */ 252 /* at this point the CT is unknown to the system */
234 pr_warn("%s[%d]: Coprocessor %d is unavailable", 253 pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
235 current->comm, current->pid, ct); 254 current->comm, current->pid, ct);
236 255
237 /* get inst if we don't already have it */ 256 /* get inst if we don't already have it */
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h
index 42176bd0884..6dedc08e62c 100644
--- a/arch/powerpc/mm/icswx.h
+++ b/arch/powerpc/mm/icswx.h
@@ -59,4 +59,10 @@ extern void free_cop_pid(int free_pid);
59 59
60extern int acop_handle_fault(struct pt_regs *regs, unsigned long address, 60extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
61 unsigned long error_code); 61 unsigned long error_code);
62
63static inline u64 acop_copro_type_bit(unsigned int type)
64{
65 return 1ULL << (63 - type);
66}
67
62#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */ 68#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d974b79a306..baaafde7d13 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
458#endif 458#endif
459#ifdef CONFIG_BOOKE 459#ifdef CONFIG_BOOKE
460 { 460 {
461 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 461 void *start = kmap_atomic(page);
462 __flush_dcache_icache(start); 462 __flush_dcache_icache(start);
463 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 463 kunmap_atomic(start);
464 } 464 }
465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
466 /* On 8xx there is no need to kmap since highmem is not supported */ 466 /* On 8xx there is no need to kmap since highmem is not supported */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 51f87956f8f..0907f92ce30 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -207,7 +207,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
207 */ 207 */
208 if (mem_init_done && (p < virt_to_phys(high_memory)) && 208 if (mem_init_done && (p < virt_to_phys(high_memory)) &&
209 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { 209 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
210 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", 210 printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
211 (unsigned long long)p, __builtin_return_address(0)); 211 (unsigned long long)p, __builtin_return_address(0));
212 return NULL; 212 return NULL;
213 } 213 }
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index e22276cb67a..a538c80db2d 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -21,7 +21,6 @@
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/firmware.h>
25#include <linux/compiler.h> 24#include <linux/compiler.h>
26#include <asm/udbg.h> 25#include <asm/udbg.h>
27#include <asm/code-patching.h> 26#include <asm/code-patching.h>
@@ -307,11 +306,6 @@ void slb_initialize(void)
307 306
308 get_paca()->stab_rr = SLB_NUM_BOLTED; 307 get_paca()->stab_rr = SLB_NUM_BOLTED;
309 308
310 /* On iSeries the bolted entries have already been set up by
311 * the hypervisor from the lparMap data in head.S */
312 if (firmware_has_feature(FW_FEATURE_ISERIES))
313 return;
314
315 lflags = SLB_VSID_KERNEL | linear_llp; 309 lflags = SLB_VSID_KERNEL | linear_llp;
316 vflags = SLB_VSID_KERNEL | vmalloc_llp; 310 vflags = SLB_VSID_KERNEL | vmalloc_llp;
317 311
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index ef653dc95b6..b9ee79ce220 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -217,21 +217,6 @@ slb_finish_load:
217 * free slot first but that took too long. Unfortunately we 217 * free slot first but that took too long. Unfortunately we
218 * dont have any LRU information to help us choose a slot. 218 * dont have any LRU information to help us choose a slot.
219 */ 219 */
220#ifdef CONFIG_PPC_ISERIES
221BEGIN_FW_FTR_SECTION
222 /*
223 * On iSeries, the "bolted" stack segment can be cast out on
224 * shared processor switch so we need to check for a miss on
225 * it and restore it to the right slot.
226 */
227 ld r9,PACAKSAVE(r13)
228 clrrdi r9,r9,28
229 clrrdi r3,r3,28
230 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
231 cmpld r9,r3
232 beq 3f
233END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
234#endif /* CONFIG_PPC_ISERIES */
235 220
2367: ld r10,PACASTABRR(r13) 2217: ld r10,PACASTABRR(r13)
237 addi r10,r10,1 222 addi r10,r10,1
@@ -282,7 +267,6 @@ _GLOBAL(slb_compare_rr_to_size)
282 267
283/* 268/*
284 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 269 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
285 * We assume legacy iSeries will never have 1T segments.
286 * 270 *
287 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 271 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
288 */ 272 */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 41e31642a86..9106ebb118f 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -21,8 +21,6 @@
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/abs_addr.h> 23#include <asm/abs_addr.h>
24#include <asm/firmware.h>
25#include <asm/iseries/hv_call.h>
26 24
27struct stab_entry { 25struct stab_entry {
28 unsigned long esid_data; 26 unsigned long esid_data;
@@ -285,12 +283,5 @@ void stab_initialize(unsigned long stab)
285 /* Set ASR */ 283 /* Set ASR */
286 stabreal = get_paca()->stab_real | 0x1ul; 284 stabreal = get_paca()->stab_real | 0x1ul;
287 285
288#ifdef CONFIG_PPC_ISERIES
289 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
290 HvCall1(HvCallBaseSetASR, stabreal);
291 return;
292 }
293#endif /* CONFIG_PPC_ISERIES */
294
295 mtspr(SPRN_ASR, stabreal); 286 mtspr(SPRN_ASR, stabreal);
296} 287}