aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-08-31 03:45:21 -0400
committerIngo Molnar <mingo@elte.hu>2010-08-31 03:45:46 -0400
commitdaab7fc734a53fdeaf844b7c03053118ad1769da (patch)
tree575deb3cdcc6dda562acaed6f7c29bc81ae01cf2 /arch/powerpc/mm
parent774ea0bcb27f57b6fd521b3b6c43237782fed4b9 (diff)
parent2bfc96a127bc1cc94d26bfaa40159966064f9c8c (diff)
Merge commit 'v2.6.36-rc3' into x86/memblock
Conflicts: arch/x86/kernel/trampoline.c mm/memblock.c Merge reason: Resolve the conflicts, update to latest upstream. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c7
-rw-r--r--arch/powerpc/mm/highmem.c4
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/numa.c122
-rw-r--r--arch/powerpc/mm/pgtable.c1
-rw-r--r--arch/powerpc/mm/tlb_hash32.c15
-rw-r--r--arch/powerpc/mm/tlb_nohash.c129
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S1
8 files changed, 201 insertions, 80 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 0be8fe24c54e..cde270847e7c 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -105,9 +105,10 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
105} 105}
106 106
107/* 107/*
108 * Set up one of the I/D BAT (block address translation) register pairs. 108 * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
109 * The parameters are not checked; in particular size must be a power 109 * in particular size must be a power of 4 between 4k and 256M (or 1G, for cpus
110 * of 4 between 4k and 256M. 110 * that support extended page sizes). Note that while some cpus support a
111 * page size of 4G, we don't allow its use here.
111 */ 112 */
112static void settlbcam(int index, unsigned long virt, phys_addr_t phys, 113static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
113 unsigned long size, unsigned long flags, unsigned int pid) 114 unsigned long size, unsigned long flags, unsigned int pid)
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index c2186c74c85a..857d4173f9c6 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -52,7 +52,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
52} 52}
53EXPORT_SYMBOL(kmap_atomic_prot); 53EXPORT_SYMBOL(kmap_atomic_prot);
54 54
55void kunmap_atomic(void *kvaddr, enum km_type type) 55void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
56{ 56{
57#ifdef CONFIG_DEBUG_HIGHMEM 57#ifdef CONFIG_DEBUG_HIGHMEM
58 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 58 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
@@ -74,4 +74,4 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
74#endif 74#endif
75 pagefault_enable(); 75 pagefault_enable();
76} 76}
77EXPORT_SYMBOL(kunmap_atomic); 77EXPORT_SYMBOL(kunmap_atomic_notypecheck);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9e081ffbf0f2..6374b2196a17 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -79,7 +79,9 @@
79#endif /* CONFIG_PPC_STD_MMU_64 */ 79#endif /* CONFIG_PPC_STD_MMU_64 */
80 80
81phys_addr_t memstart_addr = ~0; 81phys_addr_t memstart_addr = ~0;
82EXPORT_SYMBOL_GPL(memstart_addr);
82phys_addr_t kernstart_addr; 83phys_addr_t kernstart_addr;
84EXPORT_SYMBOL_GPL(kernstart_addr);
83 85
84void free_initmem(void) 86void free_initmem(void)
85{ 87{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9ba9ba1a430d..066fb443ba5a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -42,6 +42,12 @@ EXPORT_SYMBOL(node_data);
42 42
43static int min_common_depth; 43static int min_common_depth;
44static int n_mem_addr_cells, n_mem_size_cells; 44static int n_mem_addr_cells, n_mem_size_cells;
45static int form1_affinity;
46
47#define MAX_DISTANCE_REF_POINTS 4
48static int distance_ref_points_depth;
49static const unsigned int *distance_ref_points;
50static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
45 51
46/* 52/*
47 * Allocate node_to_cpumask_map based on number of available nodes 53 * Allocate node_to_cpumask_map based on number of available nodes
@@ -204,6 +210,39 @@ static const u32 *of_get_usable_memory(struct device_node *memory)
204 return prop; 210 return prop;
205} 211}
206 212
213int __node_distance(int a, int b)
214{
215 int i;
216 int distance = LOCAL_DISTANCE;
217
218 if (!form1_affinity)
219 return distance;
220
221 for (i = 0; i < distance_ref_points_depth; i++) {
222 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
223 break;
224
225 /* Double the distance for each NUMA level */
226 distance *= 2;
227 }
228
229 return distance;
230}
231
232static void initialize_distance_lookup_table(int nid,
233 const unsigned int *associativity)
234{
235 int i;
236
237 if (!form1_affinity)
238 return;
239
240 for (i = 0; i < distance_ref_points_depth; i++) {
241 distance_lookup_table[nid][i] =
242 associativity[distance_ref_points[i]];
243 }
244}
245
207/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 246/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
208 * info is found. 247 * info is found.
209 */ 248 */
@@ -225,6 +264,10 @@ static int of_node_to_nid_single(struct device_node *device)
225 /* POWER4 LPAR uses 0xffff as invalid node */ 264 /* POWER4 LPAR uses 0xffff as invalid node */
226 if (nid == 0xffff || nid >= MAX_NUMNODES) 265 if (nid == 0xffff || nid >= MAX_NUMNODES)
227 nid = -1; 266 nid = -1;
267
268 if (nid > 0 && tmp[0] >= distance_ref_points_depth)
269 initialize_distance_lookup_table(nid, tmp);
270
228out: 271out:
229 return nid; 272 return nid;
230} 273}
@@ -251,26 +294,10 @@ int of_node_to_nid(struct device_node *device)
251} 294}
252EXPORT_SYMBOL_GPL(of_node_to_nid); 295EXPORT_SYMBOL_GPL(of_node_to_nid);
253 296
254/*
255 * In theory, the "ibm,associativity" property may contain multiple
256 * associativity lists because a resource may be multiply connected
257 * into the machine. This resource then has different associativity
258 * characteristics relative to its multiple connections. We ignore
259 * this for now. We also assume that all cpu and memory sets have
260 * their distances represented at a common level. This won't be
261 * true for hierarchical NUMA.
262 *
263 * In any case the ibm,associativity-reference-points should give
264 * the correct depth for a normal NUMA system.
265 *
266 * - Dave Hansen <haveblue@us.ibm.com>
267 */
268static int __init find_min_common_depth(void) 297static int __init find_min_common_depth(void)
269{ 298{
270 int depth, index; 299 int depth;
271 const unsigned int *ref_points;
272 struct device_node *rtas_root; 300 struct device_node *rtas_root;
273 unsigned int len;
274 struct device_node *chosen; 301 struct device_node *chosen;
275 const char *vec5; 302 const char *vec5;
276 303
@@ -280,18 +307,28 @@ static int __init find_min_common_depth(void)
280 return -1; 307 return -1;
281 308
282 /* 309 /*
283 * this property is 2 32-bit integers, each representing a level of 310 * This property is a set of 32-bit integers, each representing
284 * depth in the associativity nodes. The first is for an SMP 311 * an index into the ibm,associativity nodes.
285 * configuration (should be all 0's) and the second is for a normal 312 *
286 * NUMA configuration. 313 * With form 0 affinity the first integer is for an SMP configuration
314 * (should be all 0's) and the second is for a normal NUMA
315 * configuration. We have only one level of NUMA.
316 *
317 * With form 1 affinity the first integer is the most significant
318 * NUMA boundary and the following are progressively less significant
319 * boundaries. There can be more than one level of NUMA.
287 */ 320 */
288 index = 1; 321 distance_ref_points = of_get_property(rtas_root,
289 ref_points = of_get_property(rtas_root, 322 "ibm,associativity-reference-points",
290 "ibm,associativity-reference-points", &len); 323 &distance_ref_points_depth);
324
325 if (!distance_ref_points) {
326 dbg("NUMA: ibm,associativity-reference-points not found.\n");
327 goto err;
328 }
329
330 distance_ref_points_depth /= sizeof(int);
291 331
292 /*
293 * For form 1 affinity information we want the first field
294 */
295#define VEC5_AFFINITY_BYTE 5 332#define VEC5_AFFINITY_BYTE 5
296#define VEC5_AFFINITY 0x80 333#define VEC5_AFFINITY 0x80
297 chosen = of_find_node_by_path("/chosen"); 334 chosen = of_find_node_by_path("/chosen");
@@ -299,19 +336,38 @@ static int __init find_min_common_depth(void)
299 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL); 336 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
300 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) { 337 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
301 dbg("Using form 1 affinity\n"); 338 dbg("Using form 1 affinity\n");
302 index = 0; 339 form1_affinity = 1;
303 } 340 }
304 } 341 }
305 342
306 if ((len >= 2 * sizeof(unsigned int)) && ref_points) { 343 if (form1_affinity) {
307 depth = ref_points[index]; 344 depth = distance_ref_points[0];
308 } else { 345 } else {
309 dbg("NUMA: ibm,associativity-reference-points not found.\n"); 346 if (distance_ref_points_depth < 2) {
310 depth = -1; 347 printk(KERN_WARNING "NUMA: "
348 "short ibm,associativity-reference-points\n");
349 goto err;
350 }
351
352 depth = distance_ref_points[1];
353 }
354
355 /*
356 * Warn and cap if the hardware supports more than
357 * MAX_DISTANCE_REF_POINTS domains.
358 */
359 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
360 printk(KERN_WARNING "NUMA: distance array capped at "
361 "%d entries\n", MAX_DISTANCE_REF_POINTS);
362 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
311 } 363 }
312 of_node_put(rtas_root);
313 364
365 of_node_put(rtas_root);
314 return depth; 366 return depth;
367
368err:
369 of_node_put(rtas_root);
370 return -1;
315} 371}
316 372
317static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 373static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ebc2f38eb381..2c7e801ab20b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -92,7 +92,6 @@ static void pte_free_rcu_callback(struct rcu_head *head)
92 92
93static void pte_free_submit(struct pte_freelist_batch *batch) 93static void pte_free_submit(struct pte_freelist_batch *batch)
94{ 94{
95 INIT_RCU_HEAD(&batch->rcu);
96 call_rcu(&batch->rcu, pte_free_rcu_callback); 95 call_rcu(&batch->rcu, pte_free_rcu_callback);
97} 96}
98 97
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 8aaa8b7eb324..690566b66e8e 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -89,17 +89,6 @@ void tlb_flush(struct mmu_gather *tlb)
89 * -- Cort 89 * -- Cort
90 */ 90 */
91 91
92/*
93 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
94 * the cache operations on the bus. Hence we need to use an IPI
95 * to get the other CPU(s) to invalidate their TLBs.
96 */
97#ifdef CONFIG_SMP_750
98#define FINISH_FLUSH smp_send_tlb_invalidate(0)
99#else
100#define FINISH_FLUSH do { } while (0)
101#endif
102
103static void flush_range(struct mm_struct *mm, unsigned long start, 92static void flush_range(struct mm_struct *mm, unsigned long start,
104 unsigned long end) 93 unsigned long end)
105{ 94{
@@ -138,7 +127,6 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
138void flush_tlb_kernel_range(unsigned long start, unsigned long end) 127void flush_tlb_kernel_range(unsigned long start, unsigned long end)
139{ 128{
140 flush_range(&init_mm, start, end); 129 flush_range(&init_mm, start, end);
141 FINISH_FLUSH;
142} 130}
143EXPORT_SYMBOL(flush_tlb_kernel_range); 131EXPORT_SYMBOL(flush_tlb_kernel_range);
144 132
@@ -162,7 +150,6 @@ void flush_tlb_mm(struct mm_struct *mm)
162 */ 150 */
163 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) 151 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
164 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); 152 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
165 FINISH_FLUSH;
166} 153}
167EXPORT_SYMBOL(flush_tlb_mm); 154EXPORT_SYMBOL(flush_tlb_mm);
168 155
@@ -179,7 +166,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
179 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); 166 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
180 if (!pmd_none(*pmd)) 167 if (!pmd_none(*pmd))
181 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); 168 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
182 FINISH_FLUSH;
183} 169}
184EXPORT_SYMBOL(flush_tlb_page); 170EXPORT_SYMBOL(flush_tlb_page);
185 171
@@ -192,6 +178,5 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
192 unsigned long end) 178 unsigned long end)
193{ 179{
194 flush_range(vma->vm_mm, start, end); 180 flush_range(vma->vm_mm, start, end);
195 FINISH_FLUSH;
196} 181}
197EXPORT_SYMBOL(flush_tlb_range); 182EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index a086ed562606..6a0f20c25469 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -46,6 +46,7 @@
46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
47 [MMU_PAGE_4K] = { 47 [MMU_PAGE_4K] = {
48 .shift = 12, 48 .shift = 12,
49 .ind = 20,
49 .enc = BOOK3E_PAGESZ_4K, 50 .enc = BOOK3E_PAGESZ_4K,
50 }, 51 },
51 [MMU_PAGE_16K] = { 52 [MMU_PAGE_16K] = {
@@ -54,6 +55,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
54 }, 55 },
55 [MMU_PAGE_64K] = { 56 [MMU_PAGE_64K] = {
56 .shift = 16, 57 .shift = 16,
58 .ind = 28,
57 .enc = BOOK3E_PAGESZ_64K, 59 .enc = BOOK3E_PAGESZ_64K,
58 }, 60 },
59 [MMU_PAGE_1M] = { 61 [MMU_PAGE_1M] = {
@@ -62,6 +64,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
62 }, 64 },
63 [MMU_PAGE_16M] = { 65 [MMU_PAGE_16M] = {
64 .shift = 24, 66 .shift = 24,
67 .ind = 36,
65 .enc = BOOK3E_PAGESZ_16M, 68 .enc = BOOK3E_PAGESZ_16M,
66 }, 69 },
67 [MMU_PAGE_256M] = { 70 [MMU_PAGE_256M] = {
@@ -344,16 +347,108 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
344 } 347 }
345} 348}
346 349
347/* 350static void setup_page_sizes(void)
348 * Early initialization of the MMU TLB code 351{
349 */ 352 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
350static void __early_init_mmu(int boot_cpu) 353 unsigned int tlb0ps = mfspr(SPRN_TLB0PS);
354 unsigned int eptcfg = mfspr(SPRN_EPTCFG);
355 int i, psize;
356
357 /* Look for supported direct sizes */
358 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
359 struct mmu_psize_def *def = &mmu_psize_defs[psize];
360
361 if (tlb0ps & (1U << (def->shift - 10)))
362 def->flags |= MMU_PAGE_SIZE_DIRECT;
363 }
364
365 /* Indirect page sizes supported ? */
366 if ((tlb0cfg & TLBnCFG_IND) == 0)
367 goto no_indirect;
368
369 /* Now, we only deal with one IND page size for each
370 * direct size. Hopefully all implementations today are
371 * unambiguous, but we might want to be careful in the
372 * future.
373 */
374 for (i = 0; i < 3; i++) {
375 unsigned int ps, sps;
376
377 sps = eptcfg & 0x1f;
378 eptcfg >>= 5;
379 ps = eptcfg & 0x1f;
380 eptcfg >>= 5;
381 if (!ps || !sps)
382 continue;
383 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
384 struct mmu_psize_def *def = &mmu_psize_defs[psize];
385
386 if (ps == (def->shift - 10))
387 def->flags |= MMU_PAGE_SIZE_INDIRECT;
388 if (sps == (def->shift - 10))
389 def->ind = ps + 10;
390 }
391 }
392 no_indirect:
393
394 /* Cleanup array and print summary */
395 pr_info("MMU: Supported page sizes\n");
396 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
397 struct mmu_psize_def *def = &mmu_psize_defs[psize];
398 const char *__page_type_names[] = {
399 "unsupported",
400 "direct",
401 "indirect",
402 "direct & indirect"
403 };
404 if (def->flags == 0) {
405 def->shift = 0;
406 continue;
407 }
408 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
409 __page_type_names[def->flags & 0x3]);
410 }
411}
412
413static void setup_mmu_htw(void)
351{ 414{
352 extern unsigned int interrupt_base_book3e; 415 extern unsigned int interrupt_base_book3e;
353 extern unsigned int exc_data_tlb_miss_htw_book3e; 416 extern unsigned int exc_data_tlb_miss_htw_book3e;
354 extern unsigned int exc_instruction_tlb_miss_htw_book3e; 417 extern unsigned int exc_instruction_tlb_miss_htw_book3e;
355 418
356 unsigned int *ibase = &interrupt_base_book3e; 419 unsigned int *ibase = &interrupt_base_book3e;
420
421 /* Check if HW tablewalk is present, and if yes, enable it by:
422 *
423 * - patching the TLB miss handlers to branch to the
424 * one dedicates to it
425 *
426 * - setting the global book3e_htw_enabled
427 */
428 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
429
430 if ((tlb0cfg & TLBnCFG_IND) &&
431 (tlb0cfg & TLBnCFG_PT)) {
432 /* Our exceptions vectors start with a NOP and -then- a branch
433 * to deal with single stepping from userspace which stops on
434 * the second instruction. Thus we need to patch the second
435 * instruction of the exception, not the first one
436 */
437 patch_branch(ibase + (0x1c0 / 4) + 1,
438 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
439 patch_branch(ibase + (0x1e0 / 4) + 1,
440 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
441 book3e_htw_enabled = 1;
442 }
443 pr_info("MMU: Book3E Page Tables %s\n",
444 book3e_htw_enabled ? "Enabled" : "Disabled");
445}
446
447/*
448 * Early initialization of the MMU TLB code
449 */
450static void __early_init_mmu(int boot_cpu)
451{
357 unsigned int mas4; 452 unsigned int mas4;
358 453
359 /* XXX This will have to be decided at runtime, but right 454 /* XXX This will have to be decided at runtime, but right
@@ -370,35 +465,17 @@ static void __early_init_mmu(int boot_cpu)
370 */ 465 */
371 mmu_vmemmap_psize = MMU_PAGE_16M; 466 mmu_vmemmap_psize = MMU_PAGE_16M;
372 467
373 /* Check if HW tablewalk is present, and if yes, enable it by:
374 *
375 * - patching the TLB miss handlers to branch to the
376 * one dedicates to it
377 *
378 * - setting the global book3e_htw_enabled
379 *
380 * - Set MAS4:INDD and default page size
381 */
382
383 /* XXX This code only checks for TLB 0 capabilities and doesn't 468 /* XXX This code only checks for TLB 0 capabilities and doesn't
384 * check what page size combos are supported by the HW. It 469 * check what page size combos are supported by the HW. It
385 * also doesn't handle the case where a separate array holds 470 * also doesn't handle the case where a separate array holds
386 * the IND entries from the array loaded by the PT. 471 * the IND entries from the array loaded by the PT.
387 */ 472 */
388 if (boot_cpu) { 473 if (boot_cpu) {
389 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); 474 /* Look for supported page sizes */
475 setup_page_sizes();
390 476
391 /* Check if HW loader is supported */ 477 /* Look for HW tablewalk support */
392 if ((tlb0cfg & TLBnCFG_IND) && 478 setup_mmu_htw();
393 (tlb0cfg & TLBnCFG_PT)) {
394 patch_branch(ibase + (0x1c0 / 4),
395 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
396 patch_branch(ibase + (0x1e0 / 4),
397 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
398 book3e_htw_enabled = 1;
399 }
400 pr_info("MMU: Book3E Page Tables %s\n",
401 book3e_htw_enabled ? "Enabled" : "Disabled");
402 } 479 }
403 480
404 /* Set MAS4 based on page table setting */ 481 /* Set MAS4 based on page table setting */
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index cfa768203d08..b9d9fed8f36e 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -200,6 +200,7 @@ _GLOBAL(_tlbivax_bcast)
200 rlwimi r5,r4,0,16,31 200 rlwimi r5,r4,0,16,31
201 wrteei 0 201 wrteei 0
202 mtspr SPRN_MMUCR,r5 202 mtspr SPRN_MMUCR,r5
203 isync
203/* tlbivax 0,r3 - use .long to avoid binutils deps */ 204/* tlbivax 0,r3 - use .long to avoid binutils deps */
204 .long 0x7c000624 | (r3 << 11) 205 .long 0x7c000624 | (r3 << 11)
205 isync 206 isync