aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm26
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:03 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:37 -0400
commitfc2acab31be8e869b2d5f6de12f557f6f054f19c (patch)
tree60cf419f5e88c3c46d39675a14649ea1e5849f03 /include/asm-arm26
parent4d6ddfa9242bc3d27fb0f7248f6fdee0299c731f (diff)
[PATCH] mm: tlb_finish_mmu forget rss
zap_pte_range has been counting the pages it frees in tlb->freed, then tlb_finish_mmu has used that to update the mm's rss. That got stranger when I added anon_rss, yet updated it by a different route; and stranger when rss and anon_rss became mm_counters with special access macros. And it would no longer be viable if we're relying on page_table_lock to stabilize the mm_counter, but calling tlb_finish_mmu outside that lock. Remove the mmu_gather's freed field, let tlb_finish_mmu stick to its own business, just decrement the rss mm_counter in zap_pte_range (yes, there was some point to batching the update, and a subsequent patch restores that). And forget the anal paranoia of first reading the counter to avoid going negative - if rss does go negative, just fix that bug. Remove the mmu_gather's flushes and avoided_flushes from arm and arm26: no use was being made of them. But arm26 alone was actually using the freed, in the way some others use need_flush: give it a need_flush. arm26 seems to prefer spaces to tabs here: respect that. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-arm26')
-rw-r--r--include/asm-arm26/tlb.h35
1 files changed, 13 insertions, 22 deletions
diff --git a/include/asm-arm26/tlb.h b/include/asm-arm26/tlb.h
index c7d54ca0a239..08ddd85b8d35 100644
--- a/include/asm-arm26/tlb.h
+++ b/include/asm-arm26/tlb.h
@@ -10,11 +10,8 @@
10 */ 10 */
11struct mmu_gather { 11struct mmu_gather {
12 struct mm_struct *mm; 12 struct mm_struct *mm;
13 unsigned int freed; 13 unsigned int need_flush;
14 unsigned int fullmm; 14 unsigned int fullmm;
15
16 unsigned int flushes;
17 unsigned int avoided_flushes;
18}; 15};
19 16
20DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 17DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -25,8 +22,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
25 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 22 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
26 23
27 tlb->mm = mm; 24 tlb->mm = mm;
28 tlb->freed = 0; 25 tlb->need_flush = 0;
29 tlb->fullmm = full_mm_flush; 26 tlb->fullmm = full_mm_flush;
30 27
31 return tlb; 28 return tlb;
32} 29}
@@ -34,20 +31,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
34static inline void 31static inline void
35tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 32tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
36{ 33{
37 struct mm_struct *mm = tlb->mm; 34 if (tlb->need_flush)
38 unsigned long freed = tlb->freed; 35 flush_tlb_mm(tlb->mm);
39 int rss = get_mm_counter(mm, rss);
40
41 if (rss < freed)
42 freed = rss;
43 add_mm_counter(mm, rss, -freed);
44
45 if (freed) {
46 flush_tlb_mm(mm);
47 tlb->flushes++;
48 } else {
49 tlb->avoided_flushes++;
50 }
51 36
52 /* keep the page table cache within bounds */ 37 /* keep the page table cache within bounds */
53 check_pgt_cache(); 38 check_pgt_cache();
@@ -65,7 +50,13 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
65 } while (0) 50 } while (0)
66#define tlb_end_vma(tlb,vma) do { } while (0) 51#define tlb_end_vma(tlb,vma) do { } while (0)
67 52
68#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 53static inline void
54tlb_remove_page(struct mmu_gather *tlb, struct page *page)
55{
56 tlb->need_flush = 1;
57 free_page_and_swap_cache(page);
58}
59
69#define pte_free_tlb(tlb,ptep) pte_free(ptep) 60#define pte_free_tlb(tlb,ptep) pte_free(ptep)
70#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp) 61#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
71 62