aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_nohash.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:47 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:25:09 -0400
commit25d21ad6e799cccd097b9df2a2fefe19a7e1dfcf (patch)
treecd381527a069fed6cffa8755cac177639cc48b0b /arch/powerpc/mm/tlb_nohash.c
parenta8f7758c1c52a13e031266483efd5525157e43e9 (diff)
powerpc: Add TLB management code for 64-bit Book3E
This adds the TLB miss handler assembly, the low level TLB flush routines along with the necessary hook for dealing with our virtual page tables or indirect TLB entries that need to be flushes when PTE pages are freed. There is currently no support for hugetlbfs Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
-rw-r--r--arch/powerpc/mm/tlb_nohash.c203
1 files changed, 199 insertions, 4 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 6b43fc49f10..d16100c9416 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -7,8 +7,8 @@
7 * 7 *
8 * -- BenH 8 * -- BenH
9 * 9 *
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> 10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp. 11 * IBM Corp.
12 * 12 *
13 * Derived from arch/ppc/mm/init.c: 13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -34,12 +34,70 @@
34#include <linux/pagemap.h> 34#include <linux/pagemap.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/lmb.h>
37 38
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/code-patching.h>
40 42
41#include "mmu_decl.h" 43#include "mmu_decl.h"
42 44
45#ifdef CONFIG_PPC_BOOK3E
46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
47 [MMU_PAGE_4K] = {
48 .shift = 12,
49 .enc = BOOK3E_PAGESZ_4K,
50 },
51 [MMU_PAGE_16K] = {
52 .shift = 14,
53 .enc = BOOK3E_PAGESZ_16K,
54 },
55 [MMU_PAGE_64K] = {
56 .shift = 16,
57 .enc = BOOK3E_PAGESZ_64K,
58 },
59 [MMU_PAGE_1M] = {
60 .shift = 20,
61 .enc = BOOK3E_PAGESZ_1M,
62 },
63 [MMU_PAGE_16M] = {
64 .shift = 24,
65 .enc = BOOK3E_PAGESZ_16M,
66 },
67 [MMU_PAGE_256M] = {
68 .shift = 28,
69 .enc = BOOK3E_PAGESZ_256M,
70 },
71 [MMU_PAGE_1G] = {
72 .shift = 30,
73 .enc = BOOK3E_PAGESZ_1GB,
74 },
75};
76static inline int mmu_get_tsize(int psize)
77{
78 return mmu_psize_defs[psize].enc;
79}
80#else
81static inline int mmu_get_tsize(int psize)
82{
83 /* This isn't used on !Book3E for now */
84 return 0;
85}
86#endif
87
88/* The variables below are currently only used on 64-bit Book3E
89 * though this will probably be made common with other nohash
90 * implementations at some point
91 */
92#ifdef CONFIG_PPC64
93
94int mmu_linear_psize; /* Page size used for the linear mapping */
95int mmu_pte_psize; /* Page size used for PTE pages */
96int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
97unsigned long linear_map_top; /* Top of linear mapping */
98
99#endif /* CONFIG_PPC64 */
100
43/* 101/*
44 * Base TLB flushing operations: 102 * Base TLB flushing operations:
45 * 103 *
@@ -82,7 +140,7 @@ void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
82void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 140void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
83{ 141{
84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 142 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
85 0 /* tsize unused for now */, 0); 143 mmu_get_tsize(mmu_virtual_psize), 0);
86} 144}
87EXPORT_SYMBOL(local_flush_tlb_page); 145EXPORT_SYMBOL(local_flush_tlb_page);
88 146
@@ -198,7 +256,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 256void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
199{ 257{
200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 258 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
201 0 /* tsize unused for now */, 0); 259 mmu_get_tsize(mmu_virtual_psize), 0);
202} 260}
203EXPORT_SYMBOL(flush_tlb_page); 261EXPORT_SYMBOL(flush_tlb_page);
204 262
@@ -241,3 +299,140 @@ void tlb_flush(struct mmu_gather *tlb)
241 /* Push out batch of freed page tables */ 299 /* Push out batch of freed page tables */
242 pte_free_finish(); 300 pte_free_finish();
243} 301}
302
303/*
304 * Below are functions specific to the 64-bit variant of Book3E though that
305 * may change in the future
306 */
307
308#ifdef CONFIG_PPC64
309
310/*
311 * Handling of virtual linear page tables or indirect TLB entries
312 * flushing when PTE pages are freed
313 */
314void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
315{
316 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
317
318 if (book3e_htw_enabled) {
319 unsigned long start = address & PMD_MASK;
320 unsigned long end = address + PMD_SIZE;
321 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
322
323 /* This isn't the most optimal, ideally we would factor out the
324 * while preempt & CPU mask mucking around, or even the IPI but
325 * it will do for now
326 */
327 while (start < end) {
328 __flush_tlb_page(tlb->mm, start, tsize, 1);
329 start += size;
330 }
331 } else {
332 unsigned long rmask = 0xf000000000000000ul;
333 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
334 unsigned long vpte = address & ~rmask;
335
336#ifdef CONFIG_PPC_64K_PAGES
337 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
338#else
339 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
340#endif
341 vpte |= rid;
342 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
343 }
344}
345
346/*
347 * Early initialization of the MMU TLB code
348 */
349static void __early_init_mmu(int boot_cpu)
350{
351 extern unsigned int interrupt_base_book3e;
352 extern unsigned int exc_data_tlb_miss_htw_book3e;
353 extern unsigned int exc_instruction_tlb_miss_htw_book3e;
354
355 unsigned int *ibase = &interrupt_base_book3e;
356 unsigned int mas4;
357
358 /* XXX This will have to be decided at runtime, but right
359 * now our boot and TLB miss code hard wires it
360 */
361 mmu_linear_psize = MMU_PAGE_1G;
362
363
364 /* Check if HW tablewalk is present, and if yes, enable it by:
365 *
366 * - patching the TLB miss handlers to branch to the
367 * one dedicates to it
368 *
369 * - setting the global book3e_htw_enabled
370 *
371 * - Set MAS4:INDD and default page size
372 */
373
374 /* XXX This code only checks for TLB 0 capabilities and doesn't
375 * check what page size combos are supported by the HW. It
376 * also doesn't handle the case where a separate array holds
377 * the IND entries from the array loaded by the PT.
378 */
379 if (boot_cpu) {
380 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
381
382 /* Check if HW loader is supported */
383 if ((tlb0cfg & TLBnCFG_IND) &&
384 (tlb0cfg & TLBnCFG_PT)) {
385 patch_branch(ibase + (0x1c0 / 4),
386 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
387 patch_branch(ibase + (0x1e0 / 4),
388 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
389 book3e_htw_enabled = 1;
390 }
391 pr_info("MMU: Book3E Page Tables %s\n",
392 book3e_htw_enabled ? "Enabled" : "Disabled");
393 }
394
395 /* Set MAS4 based on page table setting */
396
397 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
398 if (book3e_htw_enabled) {
399 mas4 |= mas4 | MAS4_INDD;
400#ifdef CONFIG_PPC_64K_PAGES
401 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
402 mmu_pte_psize = MMU_PAGE_256M;
403#else
404 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
405 mmu_pte_psize = MMU_PAGE_1M;
406#endif
407 } else {
408#ifdef CONFIG_PPC_64K_PAGES
409 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
410#else
411 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
412#endif
413 mmu_pte_psize = mmu_virtual_psize;
414 }
415 mtspr(SPRN_MAS4, mas4);
416
417 /* Set the global containing the top of the linear mapping
418 * for use by the TLB miss code
419 */
420 linear_map_top = lmb_end_of_DRAM();
421
422 /* A sync won't hurt us after mucking around with
423 * the MMU configuration
424 */
425 mb();
426}
427
428void __init early_init_mmu(void)
429{
430 __early_init_mmu(1);
431}
432
433void __cpuinit early_init_mmu_secondary(void)
434{
435 __early_init_mmu(0);
436}
437
438#endif /* CONFIG_PPC64 */