diff options
Diffstat (limited to 'include/asm-um/tlb.h')
-rw-r--r-- | include/asm-um/tlb.h | 127 |
1 files changed, 0 insertions, 127 deletions
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h deleted file mode 100644 index 5240fa1c5e08..000000000000 --- a/include/asm-um/tlb.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | #ifndef __UM_TLB_H | ||
2 | #define __UM_TLB_H | ||
3 | |||
4 | #include <linux/pagemap.h> | ||
5 | #include <linux/swap.h> | ||
6 | #include <asm/percpu.h> | ||
7 | #include <asm/pgalloc.h> | ||
8 | #include <asm/tlbflush.h> | ||
9 | |||
10 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
11 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
12 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
13 | |||
14 | /* struct mmu_gather is an opaque type used by the mm code for passing around | ||
15 | * any data needed by arch specific code for tlb_remove_page. | ||
16 | */ | ||
17 | struct mmu_gather { | ||
18 | struct mm_struct *mm; | ||
19 | unsigned int need_flush; /* Really unmapped some ptes? */ | ||
20 | unsigned long start; | ||
21 | unsigned long end; | ||
22 | unsigned int fullmm; /* non-zero means full mm flush */ | ||
23 | }; | ||
24 | |||
25 | /* Users of the generic TLB shootdown code must declare this storage space. */ | ||
26 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
27 | |||
28 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | ||
29 | unsigned long address) | ||
30 | { | ||
31 | if (tlb->start > address) | ||
32 | tlb->start = address; | ||
33 | if (tlb->end < address + PAGE_SIZE) | ||
34 | tlb->end = address + PAGE_SIZE; | ||
35 | } | ||
36 | |||
37 | static inline void init_tlb_gather(struct mmu_gather *tlb) | ||
38 | { | ||
39 | tlb->need_flush = 0; | ||
40 | |||
41 | tlb->start = TASK_SIZE; | ||
42 | tlb->end = 0; | ||
43 | |||
44 | if (tlb->fullmm) { | ||
45 | tlb->start = 0; | ||
46 | tlb->end = TASK_SIZE; | ||
47 | } | ||
48 | } | ||
49 | |||
50 | /* tlb_gather_mmu | ||
51 | * Return a pointer to an initialized struct mmu_gather. | ||
52 | */ | ||
53 | static inline struct mmu_gather * | ||
54 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | ||
55 | { | ||
56 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | ||
57 | |||
58 | tlb->mm = mm; | ||
59 | tlb->fullmm = full_mm_flush; | ||
60 | |||
61 | init_tlb_gather(tlb); | ||
62 | |||
63 | return tlb; | ||
64 | } | ||
65 | |||
66 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | ||
67 | unsigned long end); | ||
68 | |||
69 | static inline void | ||
70 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
71 | { | ||
72 | if (!tlb->need_flush) | ||
73 | return; | ||
74 | |||
75 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); | ||
76 | init_tlb_gather(tlb); | ||
77 | } | ||
78 | |||
79 | /* tlb_finish_mmu | ||
80 | * Called at the end of the shootdown operation to free up any resources | ||
81 | * that were required. | ||
82 | */ | ||
83 | static inline void | ||
84 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
85 | { | ||
86 | tlb_flush_mmu(tlb, start, end); | ||
87 | |||
88 | /* keep the page table cache within bounds */ | ||
89 | check_pgt_cache(); | ||
90 | |||
91 | put_cpu_var(mmu_gathers); | ||
92 | } | ||
93 | |||
94 | /* tlb_remove_page | ||
95 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), | ||
96 | * while handling the additional races in SMP caused by other CPUs | ||
97 | * caching valid mappings in their TLBs. | ||
98 | */ | ||
99 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
100 | { | ||
101 | tlb->need_flush = 1; | ||
102 | free_page_and_swap_cache(page); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | ||
108 | * | ||
109 | * Record the fact that pte's were really umapped in ->need_flush, so we can | ||
110 | * later optimise away the tlb invalidate. This helps when userspace is | ||
111 | * unmapping already-unmapped pages, which happens quite a lot. | ||
112 | */ | ||
113 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ | ||
114 | do { \ | ||
115 | tlb->need_flush = 1; \ | ||
116 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | ||
117 | } while (0) | ||
118 | |||
119 | #define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) | ||
120 | |||
121 | #define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) | ||
122 | |||
123 | #define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) | ||
124 | |||
125 | #define tlb_migrate_finish(mm) do {} while (0) | ||
126 | |||
127 | #endif | ||