diff options
author | Jeff Dike <jdike@addtoit.com> | 2008-02-05 01:31:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:29 -0500 |
commit | 0b4e273fb83bce5dd8e166a4defb16ebdd215abf (patch) | |
tree | 18f30fe9092dacf9a7e3474a9b0d692d91962242 /include/asm-um | |
parent | 909e90d3c410b684e564729145f7c20dad887757 (diff) |
uml: customize tlb.h
Customize the hooks in tlb.h to optimize TLB flushing some more.
Add start and end fields to tlb_gather_mmu, which are used to limit
the address space range scanned when a region is unmapped.
The interfaces which just free page tables, without actually changing
mappings, don't need to cause a TLB flush.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-um')
-rw-r--r-- | include/asm-um/tlb.h | 122 |
1 files changed, 121 insertions, 1 deletions
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h index c640033bc1fd..39fc475df6c9 100644 --- a/include/asm-um/tlb.h +++ b/include/asm-um/tlb.h | |||
@@ -1,6 +1,126 @@ | |||
1 | #ifndef __UM_TLB_H | 1 | #ifndef __UM_TLB_H |
2 | #define __UM_TLB_H | 2 | #define __UM_TLB_H |
3 | 3 | ||
4 | #include <asm/arch/tlb.h> | 4 | #include <linux/swap.h> |
5 | #include <asm/percpu.h> | ||
6 | #include <asm/pgalloc.h> | ||
7 | #include <asm/tlbflush.h> | ||
8 | |||
9 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
10 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
11 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
12 | |||
13 | /* struct mmu_gather is an opaque type used by the mm code for passing around | ||
14 | * any data needed by arch specific code for tlb_remove_page. | ||
15 | */ | ||
16 | struct mmu_gather { | ||
17 | struct mm_struct *mm; | ||
18 | unsigned int need_flush; /* Really unmapped some ptes? */ | ||
19 | unsigned long start; | ||
20 | unsigned long end; | ||
21 | unsigned int fullmm; /* non-zero means full mm flush */ | ||
22 | }; | ||
23 | |||
24 | /* Users of the generic TLB shootdown code must declare this storage space. */ | ||
25 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
26 | |||
27 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | ||
28 | unsigned long address) | ||
29 | { | ||
30 | if (tlb->start > address) | ||
31 | tlb->start = address; | ||
32 | if (tlb->end < address + PAGE_SIZE) | ||
33 | tlb->end = address + PAGE_SIZE; | ||
34 | } | ||
35 | |||
36 | static inline void init_tlb_gather(struct mmu_gather *tlb) | ||
37 | { | ||
38 | tlb->need_flush = 0; | ||
39 | |||
40 | tlb->start = TASK_SIZE; | ||
41 | tlb->end = 0; | ||
42 | |||
43 | if (tlb->fullmm) { | ||
44 | tlb->start = 0; | ||
45 | tlb->end = TASK_SIZE; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | /* tlb_gather_mmu | ||
50 | * Return a pointer to an initialized struct mmu_gather. | ||
51 | */ | ||
52 | static inline struct mmu_gather * | ||
53 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | ||
54 | { | ||
55 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | ||
56 | |||
57 | tlb->mm = mm; | ||
58 | tlb->fullmm = full_mm_flush; | ||
59 | |||
60 | init_tlb_gather(tlb); | ||
61 | |||
62 | return tlb; | ||
63 | } | ||
64 | |||
65 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | ||
66 | unsigned long end); | ||
67 | |||
68 | static inline void | ||
69 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
70 | { | ||
71 | if (!tlb->need_flush) | ||
72 | return; | ||
73 | |||
74 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); | ||
75 | init_tlb_gather(tlb); | ||
76 | } | ||
77 | |||
78 | /* tlb_finish_mmu | ||
79 | * Called at the end of the shootdown operation to free up any resources | ||
80 | * that were required. | ||
81 | */ | ||
82 | static inline void | ||
83 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
84 | { | ||
85 | tlb_flush_mmu(tlb, start, end); | ||
86 | |||
87 | /* keep the page table cache within bounds */ | ||
88 | check_pgt_cache(); | ||
89 | |||
90 | put_cpu_var(mmu_gathers); | ||
91 | } | ||
92 | |||
93 | /* tlb_remove_page | ||
94 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), | ||
95 | * while handling the additional races in SMP caused by other CPUs | ||
96 | * caching valid mappings in their TLBs. | ||
97 | */ | ||
98 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
99 | { | ||
100 | tlb->need_flush = 1; | ||
101 | free_page_and_swap_cache(page); | ||
102 | return; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | ||
107 | * | ||
108 | * Record the fact that pte's were really umapped in ->need_flush, so we can | ||
109 | * later optimise away the tlb invalidate. This helps when userspace is | ||
110 | * unmapping already-unmapped pages, which happens quite a lot. | ||
111 | */ | ||
112 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ | ||
113 | do { \ | ||
114 | tlb->need_flush = 1; \ | ||
115 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | ||
116 | } while (0) | ||
117 | |||
118 | #define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) | ||
119 | |||
120 | #define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) | ||
121 | |||
122 | #define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) | ||
123 | |||
124 | #define tlb_migrate_finish(mm) do {} while (0) | ||
5 | 125 | ||
6 | #endif | 126 | #endif |