aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/tlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64/tlb.h')
-rw-r--r--include/asm-ia64/tlb.h19
1 files changed, 3 insertions, 16 deletions
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
index 3a9a6d1be75c..834370b9dea1 100644
--- a/include/asm-ia64/tlb.h
+++ b/include/asm-ia64/tlb.h
@@ -60,7 +60,6 @@ struct mmu_gather {
60 unsigned int nr; /* == ~0U => fast mode */ 60 unsigned int nr; /* == ~0U => fast mode */
61 unsigned char fullmm; /* non-zero means full mm flush */ 61 unsigned char fullmm; /* non-zero means full mm flush */
62 unsigned char need_flush; /* really unmapped some PTEs? */ 62 unsigned char need_flush; /* really unmapped some PTEs? */
63 unsigned long freed; /* number of pages freed */
64 unsigned long start_addr; 63 unsigned long start_addr;
65 unsigned long end_addr; 64 unsigned long end_addr;
66 struct page *pages[FREE_PTE_NR]; 65 struct page *pages[FREE_PTE_NR];
@@ -129,7 +128,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
129static inline struct mmu_gather * 128static inline struct mmu_gather *
130tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) 129tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
131{ 130{
132 struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers); 131 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
133 132
134 tlb->mm = mm; 133 tlb->mm = mm;
135 /* 134 /*
@@ -147,25 +146,17 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
147 */ 146 */
148 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 147 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
149 tlb->fullmm = full_mm_flush; 148 tlb->fullmm = full_mm_flush;
150 tlb->freed = 0;
151 tlb->start_addr = ~0UL; 149 tlb->start_addr = ~0UL;
152 return tlb; 150 return tlb;
153} 151}
154 152
155/* 153/*
156 * Called at the end of the shootdown operation to free up any resources that were 154 * Called at the end of the shootdown operation to free up any resources that were
157 * collected. The page table lock is still held at this point. 155 * collected.
158 */ 156 */
159static inline void 157static inline void
160tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 158tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
161{ 159{
162 unsigned long freed = tlb->freed;
163 struct mm_struct *mm = tlb->mm;
164 unsigned long rss = get_mm_counter(mm, rss);
165
166 if (rss < freed)
167 freed = rss;
168 add_mm_counter(mm, rss, -freed);
169 /* 160 /*
170 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 161 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
171 * tlb->end_addr. 162 * tlb->end_addr.
@@ -174,12 +165,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
174 165
175 /* keep the page table cache within bounds */ 166 /* keep the page table cache within bounds */
176 check_pgt_cache(); 167 check_pgt_cache();
177}
178 168
179static inline unsigned int 169 put_cpu_var(mmu_gathers);
180tlb_is_full_mm(struct mmu_gather *tlb)
181{
182 return tlb->fullmm;
183} 170}
184 171
185/* 172/*