diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-07-23 19:15:28 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-08-19 20:24:56 -0400 |
commit | c7cc58a1ad8dfe3c199d3b6ce50412b86dd3edaf (patch) | |
tree | 1d1ded72de81743ddd1306677d64757136972402 /arch/powerpc/include/asm/tlb.h | |
parent | cf54dc7cd4f9aab55cd3e1794b0b74c3c88cd1a0 (diff) |
powerpc/mm: Rework & cleanup page table freeing code path
That patch used to just add a hook to page table flushing but
pulling that string brought out a whole bunch of issues, so it
now does that and more:
- We now make the RCU batching of page freeing SMP only, as I
believe it was intended initially. We make a few more things compile
to nothing on !CONFIG_SMP
- Some macros are turned into functions, though that forced me to
out of line a few stuffs due to unsolvable include depenencies,
however it's probably better that way anyway, it's not -that-
critical code path.
- 32-bit didn't call pte_free_finish() on tlb_flush() which means
that it wouldn't push out the batch to RCU for delayed freeing when
a bunch of page tables have been freed, they would just stay in there
until the batch gets full.
64-bit BookE will use that hook to maintain the virtually linear
page tables or the indirect entries in the TLB when using the
HW loader.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/tlb.h')
-rw-r--r-- | arch/powerpc/include/asm/tlb.h | 38 |
1 files changed, 3 insertions, 35 deletions
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index e20ff7541f36..e2b428b0f7ba 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h | |||
@@ -25,57 +25,25 @@ | |||
25 | 25 | ||
26 | #include <linux/pagemap.h> | 26 | #include <linux/pagemap.h> |
27 | 27 | ||
28 | struct mmu_gather; | ||
29 | |||
30 | #define tlb_start_vma(tlb, vma) do { } while (0) | 28 | #define tlb_start_vma(tlb, vma) do { } while (0) |
31 | #define tlb_end_vma(tlb, vma) do { } while (0) | 29 | #define tlb_end_vma(tlb, vma) do { } while (0) |
32 | 30 | ||
33 | #if !defined(CONFIG_PPC_STD_MMU) | ||
34 | |||
35 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
36 | |||
37 | #elif defined(__powerpc64__) | ||
38 | |||
39 | extern void pte_free_finish(void); | ||
40 | |||
41 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
42 | { | ||
43 | struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); | ||
44 | |||
45 | /* If there's a TLB batch pending, then we must flush it because the | ||
46 | * pages are going to be freed and we really don't want to have a CPU | ||
47 | * access a freed page because it has a stale TLB | ||
48 | */ | ||
49 | if (tlbbatch->index) | ||
50 | __flush_tlb_pending(tlbbatch); | ||
51 | |||
52 | pte_free_finish(); | ||
53 | } | ||
54 | |||
55 | #else | ||
56 | |||
57 | extern void tlb_flush(struct mmu_gather *tlb); | 31 | extern void tlb_flush(struct mmu_gather *tlb); |
58 | 32 | ||
59 | #endif | ||
60 | |||
61 | /* Get the generic bits... */ | 33 | /* Get the generic bits... */ |
62 | #include <asm-generic/tlb.h> | 34 | #include <asm-generic/tlb.h> |
63 | 35 | ||
64 | #if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__) | ||
65 | |||
66 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) | ||
67 | |||
68 | #else | ||
69 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | 36 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, |
70 | unsigned long address); | 37 | unsigned long address); |
71 | 38 | ||
72 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | 39 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, |
73 | unsigned long address) | 40 | unsigned long address) |
74 | { | 41 | { |
42 | #ifdef CONFIG_PPC_STD_MMU_32 | ||
75 | if (pte_val(*ptep) & _PAGE_HASHPTE) | 43 | if (pte_val(*ptep) & _PAGE_HASHPTE) |
76 | flush_hash_entry(tlb->mm, ptep, address); | 44 | flush_hash_entry(tlb->mm, ptep, address); |
45 | #endif | ||
77 | } | 46 | } |
78 | 47 | ||
79 | #endif | ||
80 | #endif /* __KERNEL__ */ | 48 | #endif /* __KERNEL__ */ |
81 | #endif /* __ASM_POWERPC_TLB_H */ | 49 | #endif /* __ASM_POWERPC_TLB_H */ |