aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c25
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c15
-rw-r--r--arch/powerpc/mm/imalloc.c6
-rw-r--r--arch/powerpc/mm/init_64.c8
-rw-r--r--arch/powerpc/mm/numa.c65
-rw-r--r--arch/powerpc/mm/pgtable_32.c33
-rw-r--r--arch/powerpc/mm/pgtable_64.c52
-rw-r--r--arch/powerpc/mm/slb.c13
11 files changed, 144 insertions, 79 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 93441e7a2921..38a81967ca07 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -8,7 +8,7 @@ endif
8 8
9obj-y := fault.o mem.o lmb.o 9obj-y := fault.o mem.o lmb.o
10obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o 10obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
11hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o 11hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ 12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \ 13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o imalloc.o \ 14 slb_low.o slb.o stab.o mmap.o imalloc.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e8fa50624b70..03aeb3a46077 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -426,18 +426,21 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
426 426
427 /* kernel has accessed a bad area */ 427 /* kernel has accessed a bad area */
428 428
429 printk(KERN_ALERT "Unable to handle kernel paging request for ");
430 switch (regs->trap) { 429 switch (regs->trap) {
431 case 0x300: 430 case 0x300:
432 case 0x380: 431 case 0x380:
433 printk("data at address 0x%08lx\n", regs->dar); 432 printk(KERN_ALERT "Unable to handle kernel paging request for "
434 break; 433 "data at address 0x%08lx\n", regs->dar);
435 case 0x400: 434 break;
436 case 0x480: 435 case 0x400:
437 printk("instruction fetch\n"); 436 case 0x480:
438 break; 437 printk(KERN_ALERT "Unable to handle kernel paging request for "
439 default: 438 "instruction fetch\n");
440 printk("unknown fault\n"); 439 break;
440 default:
441 printk(KERN_ALERT "Unable to handle kernel paging request for "
442 "unknown fault\n");
443 break;
441 } 444 }
442 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", 445 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
443 regs->nip); 446 regs->nip);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c90f124f3c71..6f1016acdbf6 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,7 +123,7 @@ static inline void native_unlock_hpte(hpte_t *hptep)
123 clear_bit(HPTE_LOCK_BIT, word); 123 clear_bit(HPTE_LOCK_BIT, word);
124} 124}
125 125
126long native_hpte_insert(unsigned long hpte_group, unsigned long va, 126static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
127 unsigned long pa, unsigned long rflags, 127 unsigned long pa, unsigned long rflags,
128 unsigned long vflags, int psize) 128 unsigned long vflags, int psize)
129{ 129{
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1915661c2c81..c0d2a694fa30 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -277,7 +277,7 @@ static void __init htab_init_page_sizes(void)
277 * Not in the device-tree, let's fallback on known size 277 * Not in the device-tree, let's fallback on known size
278 * list for 16M capable GP & GR 278 * list for 16M capable GP & GR
279 */ 279 */
280 if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries)) 280 if (cpu_has_feature(CPU_FTR_16M_PAGE))
281 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 281 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
282 sizeof(mmu_psize_defaults_gp)); 282 sizeof(mmu_psize_defaults_gp));
283 found: 283 found:
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 506d89768d45..1bb20d841080 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -146,6 +146,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
146 return hugepte_offset(hpdp, addr); 146 return hugepte_offset(hpdp, addr);
147} 147}
148 148
149int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
150{
151 return 0;
152}
153
149static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp) 154static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
150{ 155{
151 pte_t *hugepte = hugepd_page(*hpdp); 156 pte_t *hugepte = hugepd_page(*hpdp);
@@ -739,7 +744,8 @@ static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
739 struct vm_area_struct *vma; 744 struct vm_area_struct *vma;
740 745
741 vma = find_vma(current->mm, addr); 746 vma = find_vma(current->mm, addr);
742 if (!vma || ((addr + len) <= vma->vm_start)) 747 if (TASK_SIZE - len >= addr &&
748 (!vma || ((addr + len) <= vma->vm_start)))
743 return 0; 749 return 0;
744 750
745 return -ENOMEM; 751 return -ENOMEM;
@@ -810,6 +816,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
810 return -EINVAL; 816 return -EINVAL;
811 if (len & ~HPAGE_MASK) 817 if (len & ~HPAGE_MASK)
812 return -EINVAL; 818 return -EINVAL;
819 if (len > TASK_SIZE)
820 return -ENOMEM;
813 821
814 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 822 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
815 return -EINVAL; 823 return -EINVAL;
@@ -818,9 +826,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
818 BUG_ON((addr + len) < addr); 826 BUG_ON((addr + len) < addr);
819 827
820 if (test_thread_flag(TIF_32BIT)) { 828 if (test_thread_flag(TIF_32BIT)) {
821 /* Paranoia, caller should have dealt with this */
822 BUG_ON((addr + len) > 0x100000000UL);
823
824 curareas = current->mm->context.low_htlb_areas; 829 curareas = current->mm->context.low_htlb_areas;
825 830
826 /* First see if we can use the hint address */ 831 /* First see if we can use the hint address */
@@ -1042,7 +1047,7 @@ repeat:
1042 return err; 1047 return err;
1043} 1048}
1044 1049
1045static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 1050static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
1046{ 1051{
1047 memset(addr, 0, kmem_cache_size(cache)); 1052 memset(addr, 0, kmem_cache_size(cache));
1048} 1053}
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index add8c1a9af68..c831815c31f0 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -138,7 +138,7 @@ static struct vm_struct * split_im_region(unsigned long v_addr,
138 struct vm_struct *vm2 = NULL; 138 struct vm_struct *vm2 = NULL;
139 struct vm_struct *new_vm = NULL; 139 struct vm_struct *new_vm = NULL;
140 140
141 vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL); 141 vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
142 if (vm1 == NULL) { 142 if (vm1 == NULL) {
143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); 143 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
144 return NULL; 144 return NULL;
@@ -172,7 +172,7 @@ static struct vm_struct * split_im_region(unsigned long v_addr,
172 * uppermost remainder, and use existing parent one for the 172 * uppermost remainder, and use existing parent one for the
173 * lower remainder of parent range 173 * lower remainder of parent range
174 */ 174 */
175 vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL); 175 vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
176 if (vm2 == NULL) { 176 if (vm2 == NULL) {
177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); 177 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
178 kfree(vm1); 178 kfree(vm1);
@@ -206,7 +206,7 @@ static struct vm_struct * __add_new_im_area(unsigned long req_addr,
206 break; 206 break;
207 } 207 }
208 208
209 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); 209 area = kmalloc(sizeof(*area), GFP_KERNEL);
210 if (!area) 210 if (!area)
211 return NULL; 211 return NULL;
212 area->flags = 0; 212 area->flags = 0;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3ff374697e34..d12a87ec5ae9 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -130,7 +130,7 @@ static int __init setup_kcore(void)
130 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ 130 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
131 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); 131 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
132 if (!kcore_mem) 132 if (!kcore_mem)
133 panic("mem_init: kmalloc failed\n"); 133 panic("%s: kmalloc failed\n", __FUNCTION__);
134 134
135 kclist_add(kcore_mem, __va(base), size); 135 kclist_add(kcore_mem, __va(base), size);
136 } 136 }
@@ -141,7 +141,7 @@ static int __init setup_kcore(void)
141} 141}
142module_init(setup_kcore); 142module_init(setup_kcore);
143 143
144static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 144static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
145{ 145{
146 memset(addr, 0, kmem_cache_size(cache)); 146 memset(addr, 0, kmem_cache_size(cache));
147} 147}
@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
166/* Hugepages need one extra cache, initialized in hugetlbpage.c. We 166/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
167 * can't put into the tables above, because HPAGE_SHIFT is not compile 167 * can't put into the tables above, because HPAGE_SHIFT is not compile
168 * time constant. */ 168 * time constant. */
169kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; 169struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
170#else 170#else
171kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 171struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
172#endif 172#endif
173 173
174void pgtable_cache_init(void) 174void pgtable_cache_init(void)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9da01dc8cfd9..262790910ff2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -295,6 +295,63 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
295 return lmb_end_of_DRAM() - start; 295 return lmb_end_of_DRAM() - start;
296} 296}
297 297
298/*
299 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
300 * node. This assumes n_mem_{addr,size}_cells have been set.
301 */
302static void __init parse_drconf_memory(struct device_node *memory)
303{
304 const unsigned int *lm, *dm, *aa;
305 unsigned int ls, ld, la;
306 unsigned int n, aam, aalen;
307 unsigned long lmb_size, size;
308 int nid, default_nid = 0;
309 unsigned int start, ai, flags;
310
311 lm = get_property(memory, "ibm,lmb-size", &ls);
312 dm = get_property(memory, "ibm,dynamic-memory", &ld);
313 aa = get_property(memory, "ibm,associativity-lookup-arrays", &la);
314 if (!lm || !dm || !aa ||
315 ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
316 la < 2 * sizeof(unsigned int))
317 return;
318
319 lmb_size = read_n_cells(n_mem_size_cells, &lm);
320 n = *dm++; /* number of LMBs */
321 aam = *aa++; /* number of associativity lists */
322 aalen = *aa++; /* length of each associativity list */
323 if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
324 la < (aam * aalen + 2) * sizeof(unsigned int))
325 return;
326
327 for (; n != 0; --n) {
328 start = read_n_cells(n_mem_addr_cells, &dm);
329 ai = dm[2];
330 flags = dm[3];
331 dm += 4;
332 /* 0x80 == reserved, 0x8 = assigned to us */
333 if ((flags & 0x80) || !(flags & 0x8))
334 continue;
335 nid = default_nid;
336 /* flags & 0x40 means associativity index is invalid */
337 if (min_common_depth > 0 && min_common_depth <= aalen &&
338 (flags & 0x40) == 0 && ai < aam) {
339 /* this is like of_node_to_nid_single */
340 nid = aa[ai * aalen + min_common_depth - 1];
341 if (nid == 0xffff || nid >= MAX_NUMNODES)
342 nid = default_nid;
343 }
344 node_set_online(nid);
345
346 size = numa_enforce_memory_limit(start, lmb_size);
347 if (!size)
348 continue;
349
350 add_active_range(nid, start >> PAGE_SHIFT,
351 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
352 }
353}
354
298static int __init parse_numa_properties(void) 355static int __init parse_numa_properties(void)
299{ 356{
300 struct device_node *cpu = NULL; 357 struct device_node *cpu = NULL;
@@ -385,6 +442,14 @@ new_range:
385 goto new_range; 442 goto new_range;
386 } 443 }
387 444
445 /*
446 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
447 * property in the ibm,dynamic-reconfiguration-memory node.
448 */
449 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
450 if (memory)
451 parse_drconf_memory(memory);
452
388 return 0; 453 return 0;
389} 454}
390 455
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8fcacb0239da..1891dbeeb8e9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -141,29 +141,19 @@ void pte_free(struct page *ptepage)
141 __free_page(ptepage); 141 __free_page(ptepage);
142} 142}
143 143
144#ifndef CONFIG_PHYS_64BIT
145void __iomem * 144void __iomem *
146ioremap(phys_addr_t addr, unsigned long size) 145ioremap(phys_addr_t addr, unsigned long size)
147{ 146{
148 return __ioremap(addr, size, _PAGE_NO_CACHE); 147 return __ioremap(addr, size, _PAGE_NO_CACHE);
149} 148}
150#else /* CONFIG_PHYS_64BIT */ 149EXPORT_SYMBOL(ioremap);
151void __iomem *
152ioremap64(unsigned long long addr, unsigned long size)
153{
154 return __ioremap(addr, size, _PAGE_NO_CACHE);
155}
156EXPORT_SYMBOL(ioremap64);
157 150
158void __iomem * 151void __iomem *
159ioremap(phys_addr_t addr, unsigned long size) 152ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
160{ 153{
161 phys_addr_t addr64 = fixup_bigphys_addr(addr, size); 154 return __ioremap(addr, size, flags);
162
163 return ioremap64(addr64, size);
164} 155}
165#endif /* CONFIG_PHYS_64BIT */ 156EXPORT_SYMBOL(ioremap_flags);
166EXPORT_SYMBOL(ioremap);
167 157
168void __iomem * 158void __iomem *
169__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) 159__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
@@ -264,20 +254,7 @@ void iounmap(volatile void __iomem *addr)
264} 254}
265EXPORT_SYMBOL(iounmap); 255EXPORT_SYMBOL(iounmap);
266 256
267void __iomem *ioport_map(unsigned long port, unsigned int len) 257int map_page(unsigned long va, phys_addr_t pa, int flags)
268{
269 return (void __iomem *) (port + _IO_BASE);
270}
271
272void ioport_unmap(void __iomem *addr)
273{
274 /* Nothing to do */
275}
276EXPORT_SYMBOL(ioport_map);
277EXPORT_SYMBOL(ioport_unmap);
278
279int
280map_page(unsigned long va, phys_addr_t pa, int flags)
281{ 258{
282 pmd_t *pd; 259 pmd_t *pd;
283 pte_t *pg; 260 pte_t *pg;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4aaa509..16e4ee1c2318 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -113,7 +113,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
113} 113}
114 114
115 115
116static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, 116static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
117 unsigned long ea, unsigned long size, 117 unsigned long ea, unsigned long size,
118 unsigned long flags) 118 unsigned long flags)
119{ 119{
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 129 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
130} 130}
131 131
132 132void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
133void __iomem *
134ioremap(unsigned long addr, unsigned long size)
135{
136 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
137}
138
139void __iomem * __ioremap(unsigned long addr, unsigned long size,
140 unsigned long flags) 133 unsigned long flags)
141{ 134{
142 unsigned long pa, ea; 135 unsigned long pa, ea;
143 void __iomem *ret; 136 void __iomem *ret;
144 137
145 if (firmware_has_feature(FW_FEATURE_ISERIES))
146 return (void __iomem *)addr;
147
148 /* 138 /*
149 * Choose an address to map it to. 139 * Choose an address to map it to.
150 * Once the imalloc system is running, we use it. 140 * Once the imalloc system is running, we use it.
@@ -178,9 +168,28 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
178 return ret; 168 return ret;
179} 169}
180 170
171
172void __iomem * ioremap(phys_addr_t addr, unsigned long size)
173{
174 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
175
176 if (ppc_md.ioremap)
177 return ppc_md.ioremap(addr, size, flags);
178 return __ioremap(addr, size, flags);
179}
180
181void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
182 unsigned long flags)
183{
184 if (ppc_md.ioremap)
185 return ppc_md.ioremap(addr, size, flags);
186 return __ioremap(addr, size, flags);
187}
188
189
181#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) 190#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
182 191
183int __ioremap_explicit(unsigned long pa, unsigned long ea, 192int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
184 unsigned long size, unsigned long flags) 193 unsigned long size, unsigned long flags)
185{ 194{
186 struct vm_struct *area; 195 struct vm_struct *area;
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
235 * 244 *
236 * XXX what about calls before mem_init_done (ie python_countermeasures()) 245 * XXX what about calls before mem_init_done (ie python_countermeasures())
237 */ 246 */
238void iounmap(volatile void __iomem *token) 247void __iounmap(volatile void __iomem *token)
239{ 248{
240 void *addr; 249 void *addr;
241 250
242 if (firmware_has_feature(FW_FEATURE_ISERIES))
243 return;
244
245 if (!mem_init_done) 251 if (!mem_init_done)
246 return; 252 return;
247 253
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token)
250 im_free(addr); 256 im_free(addr);
251} 257}
252 258
259void iounmap(volatile void __iomem *token)
260{
261 if (ppc_md.iounmap)
262 ppc_md.iounmap(token);
263 else
264 __iounmap(token);
265}
266
253static int iounmap_subset_regions(unsigned long addr, unsigned long size) 267static int iounmap_subset_regions(unsigned long addr, unsigned long size)
254{ 268{
255 struct vm_struct *area; 269 struct vm_struct *area;
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size)
268 return 0; 282 return 0;
269} 283}
270 284
271int iounmap_explicit(volatile void __iomem *start, unsigned long size) 285int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
272{ 286{
273 struct vm_struct *area; 287 struct vm_struct *area;
274 unsigned long addr; 288 unsigned long addr;
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
303} 317}
304 318
305EXPORT_SYMBOL(ioremap); 319EXPORT_SYMBOL(ioremap);
320EXPORT_SYMBOL(ioremap_flags);
306EXPORT_SYMBOL(__ioremap); 321EXPORT_SYMBOL(__ioremap);
307EXPORT_SYMBOL(iounmap); 322EXPORT_SYMBOL(iounmap);
323EXPORT_SYMBOL(__iounmap);
308 324
309void __iomem * reserve_phb_iospace(unsigned long size) 325void __iomem * reserve_phb_iospace(unsigned long size)
310{ 326{
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index d3733912adb4..224e960650a0 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -23,6 +23,7 @@
23#include <asm/cputable.h> 23#include <asm/cputable.h>
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/firmware.h>
26#include <linux/compiler.h> 27#include <linux/compiler.h>
27 28
28#ifdef DEBUG 29#ifdef DEBUG
@@ -193,6 +194,7 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
193void slb_initialize(void) 194void slb_initialize(void)
194{ 195{
195 unsigned long linear_llp, vmalloc_llp, io_llp; 196 unsigned long linear_llp, vmalloc_llp, io_llp;
197 unsigned long lflags, vflags;
196 static int slb_encoding_inited; 198 static int slb_encoding_inited;
197 extern unsigned int *slb_miss_kernel_load_linear; 199 extern unsigned int *slb_miss_kernel_load_linear;
198 extern unsigned int *slb_miss_kernel_load_io; 200 extern unsigned int *slb_miss_kernel_load_io;
@@ -225,11 +227,12 @@ void slb_initialize(void)
225#endif 227#endif
226 } 228 }
227 229
230 get_paca()->stab_rr = SLB_NUM_BOLTED;
231
228 /* On iSeries the bolted entries have already been set up by 232 /* On iSeries the bolted entries have already been set up by
229 * the hypervisor from the lparMap data in head.S */ 233 * the hypervisor from the lparMap data in head.S */
230#ifndef CONFIG_PPC_ISERIES 234 if (firmware_has_feature(FW_FEATURE_ISERIES))
231 { 235 return;
232 unsigned long lflags, vflags;
233 236
234 lflags = SLB_VSID_KERNEL | linear_llp; 237 lflags = SLB_VSID_KERNEL | linear_llp;
235 vflags = SLB_VSID_KERNEL | vmalloc_llp; 238 vflags = SLB_VSID_KERNEL | vmalloc_llp;
@@ -247,8 +250,4 @@ void slb_initialize(void)
247 * elsewhere, we'll call _switch() which will bolt in the new 250 * elsewhere, we'll call _switch() which will bolt in the new
248 * one. */ 251 * one. */
249 asm volatile("isync":::"memory"); 252 asm volatile("isync":::"memory");
250 }
251#endif /* CONFIG_PPC_ISERIES */
252
253 get_paca()->stab_rr = SLB_NUM_BOLTED;
254} 253}