diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:37 -0400 |
commit | fc2acab31be8e869b2d5f6de12f557f6f054f19c (patch) | |
tree | 60cf419f5e88c3c46d39675a14649ea1e5849f03 /include/asm-arm/tlb.h | |
parent | 4d6ddfa9242bc3d27fb0f7248f6fdee0299c731f (diff) |
[PATCH] mm: tlb_finish_mmu forget rss
zap_pte_range has been counting the pages it frees in tlb->freed, then
tlb_finish_mmu has used that to update the mm's rss. That got stranger when I
added anon_rss, yet updated it by a different route; and stranger when rss and
anon_rss became mm_counters with special access macros. And it would no
longer be viable if we're relying on page_table_lock to stabilize the
mm_counter, but calling tlb_finish_mmu outside that lock.
Remove the mmu_gather's freed field, let tlb_finish_mmu stick to its own
business, just decrement the rss mm_counter in zap_pte_range (yes, there was
some point to batching the update, and a subsequent patch restores that). And
forget the anal paranoia of first reading the counter to avoid going negative
- if rss does go negative, just fix that bug.
Remove the mmu_gather's flushes and avoided_flushes from arm and arm26: no use
was being made of them. But arm26 alone was actually using the freed, in the
way some others use need_flush: give it a need_flush. arm26 seems to prefer
spaces to tabs here: respect that.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-arm/tlb.h')
-rw-r--r-- | include/asm-arm/tlb.h | 15 |
1 files changed, 1 insertions, 14 deletions
diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h index a35ab0f2e25e..f49bfb78c221 100644 --- a/include/asm-arm/tlb.h +++ b/include/asm-arm/tlb.h | |||
@@ -27,11 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | struct mmu_gather { | 28 | struct mmu_gather { |
29 | struct mm_struct *mm; | 29 | struct mm_struct *mm; |
30 | unsigned int freed; | ||
31 | unsigned int fullmm; | 30 | unsigned int fullmm; |
32 | |||
33 | unsigned int flushes; | ||
34 | unsigned int avoided_flushes; | ||
35 | }; | 31 | }; |
36 | 32 | ||
37 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 33 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
@@ -42,7 +38,6 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
42 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | 38 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
43 | 39 | ||
44 | tlb->mm = mm; | 40 | tlb->mm = mm; |
45 | tlb->freed = 0; | ||
46 | tlb->fullmm = full_mm_flush; | 41 | tlb->fullmm = full_mm_flush; |
47 | 42 | ||
48 | return tlb; | 43 | return tlb; |
@@ -51,16 +46,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
51 | static inline void | 46 | static inline void |
52 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 47 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
53 | { | 48 | { |
54 | struct mm_struct *mm = tlb->mm; | ||
55 | unsigned long freed = tlb->freed; | ||
56 | int rss = get_mm_counter(mm, rss); | ||
57 | |||
58 | if (rss < freed) | ||
59 | freed = rss; | ||
60 | add_mm_counter(mm, rss, -freed); | ||
61 | |||
62 | if (tlb->fullmm) | 49 | if (tlb->fullmm) |
63 | flush_tlb_mm(mm); | 50 | flush_tlb_mm(tlb->mm); |
64 | 51 | ||
65 | /* keep the page table cache within bounds */ | 52 | /* keep the page table cache within bounds */ |
66 | check_pgt_cache(); | 53 | check_pgt_cache(); |