aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm26/tlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm26/tlb.h')
-rw-r--r--include/asm-arm26/tlb.h47
1 files changed, 16 insertions, 31 deletions
diff --git a/include/asm-arm26/tlb.h b/include/asm-arm26/tlb.h
index 1316352a58f3..08ddd85b8d35 100644
--- a/include/asm-arm26/tlb.h
+++ b/include/asm-arm26/tlb.h
@@ -10,24 +10,20 @@
10 */ 10 */
11struct mmu_gather { 11struct mmu_gather {
12 struct mm_struct *mm; 12 struct mm_struct *mm;
13 unsigned int freed; 13 unsigned int need_flush;
14 unsigned int fullmm; 14 unsigned int fullmm;
15
16 unsigned int flushes;
17 unsigned int avoided_flushes;
18}; 15};
19 16
20extern struct mmu_gather mmu_gathers[NR_CPUS]; 17DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
21 18
22static inline struct mmu_gather * 19static inline struct mmu_gather *
23tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 20tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
24{ 21{
25 int cpu = smp_processor_id(); 22 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
26 struct mmu_gather *tlb = &mmu_gathers[cpu];
27 23
28 tlb->mm = mm; 24 tlb->mm = mm;
29 tlb->freed = 0; 25 tlb->need_flush = 0;
30 tlb->fullmm = full_mm_flush; 26 tlb->fullmm = full_mm_flush;
31 27
32 return tlb; 28 return tlb;
33} 29}
@@ -35,30 +31,13 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
35static inline void 31static inline void
36tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 32tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
37{ 33{
38 struct mm_struct *mm = tlb->mm; 34 if (tlb->need_flush)
39 unsigned long freed = tlb->freed; 35 flush_tlb_mm(tlb->mm);
40 int rss = get_mm_counter(mm, rss);
41
42 if (rss < freed)
43 freed = rss;
44 add_mm_counter(mm, rss, -freed);
45
46 if (freed) {
47 flush_tlb_mm(mm);
48 tlb->flushes++;
49 } else {
50 tlb->avoided_flushes++;
51 }
52 36
53 /* keep the page table cache within bounds */ 37 /* keep the page table cache within bounds */
54 check_pgt_cache(); 38 check_pgt_cache();
55}
56
57 39
58static inline unsigned int 40 put_cpu_var(mmu_gathers);
59tlb_is_full_mm(struct mmu_gather *tlb)
60{
61 return tlb->fullmm;
62} 41}
63 42
64#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) 43#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
@@ -71,7 +50,13 @@ tlb_is_full_mm(struct mmu_gather *tlb)
71 } while (0) 50 } while (0)
72#define tlb_end_vma(tlb,vma) do { } while (0) 51#define tlb_end_vma(tlb,vma) do { } while (0)
73 52
74#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 53static inline void
54tlb_remove_page(struct mmu_gather *tlb, struct page *page)
55{
56 tlb->need_flush = 1;
57 free_page_and_swap_cache(page);
58}
59
75#define pte_free_tlb(tlb,ptep) pte_free(ptep) 60#define pte_free_tlb(tlb,ptep) pte_free(ptep)
76#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp) 61#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
77 62