aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-10-18 23:26:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:31 -0400
commit5344b7e648980cc2ca613ec03a56a8222ff48820 (patch)
treef9f8773ae8e38fb91aec52ca9ad2bd81f039b565 /mm
parentba470de43188cdbff795b5da43a1474523c6c2fb (diff)
vmstat: mlocked pages statistics
Add NR_MLOCK zone page state, which provides a (conservative) count of mlocked pages (actually, the number of mlocked pages moved off the LRU). Reworked by lts to fit in with the modified mlock page support in the Reclaim Scalability series. [kosaki.motohiro@jp.fujitsu.com: fix incorrect Mlocked field of /proc/meminfo] [lee.schermerhorn@hp.com: mlocked-pages: add event counting with statistics] Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h16
-rw-r--r--mm/mlock.c41
-rw-r--r--mm/vmstat.c5
3 files changed, 54 insertions, 8 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 48e32f790571..1cfbf2e2bc9e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -101,7 +101,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
101 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 101 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
102 return 0; 102 return 0;
103 103
104 SetPageMlocked(page); 104 if (!TestSetPageMlocked(page)) {
105 inc_zone_page_state(page, NR_MLOCK);
106 count_vm_event(UNEVICTABLE_PGMLOCKED);
107 }
105 return 1; 108 return 1;
106} 109}
107 110
@@ -128,12 +131,19 @@ static inline void clear_page_mlock(struct page *page)
128 131
129/* 132/*
130 * mlock_migrate_page - called only from migrate_page_copy() to 133 * mlock_migrate_page - called only from migrate_page_copy() to
131 * migrate the Mlocked page flag 134 * migrate the Mlocked page flag; update statistics.
132 */ 135 */
133static inline void mlock_migrate_page(struct page *newpage, struct page *page) 136static inline void mlock_migrate_page(struct page *newpage, struct page *page)
134{ 137{
135 if (TestClearPageMlocked(page)) 138 if (TestClearPageMlocked(page)) {
139 unsigned long flags;
140
141 local_irq_save(flags);
142 __dec_zone_page_state(page, NR_MLOCK);
136 SetPageMlocked(newpage); 143 SetPageMlocked(newpage);
144 __inc_zone_page_state(newpage, NR_MLOCK);
145 local_irq_restore(flags);
146 }
137} 147}
138 148
139 149
diff --git a/mm/mlock.c b/mm/mlock.c
index 8b478350a2a1..bce1b22c36c2 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -60,6 +60,8 @@ void __clear_page_mlock(struct page *page)
60 return; 60 return;
61 } 61 }
62 62
63 dec_zone_page_state(page, NR_MLOCK);
64 count_vm_event(UNEVICTABLE_PGCLEARED);
63 if (!isolate_lru_page(page)) { 65 if (!isolate_lru_page(page)) {
64 putback_lru_page(page); 66 putback_lru_page(page);
65 } else { 67 } else {
@@ -69,6 +71,9 @@ void __clear_page_mlock(struct page *page)
69 lru_add_drain_all(); 71 lru_add_drain_all();
70 if (!isolate_lru_page(page)) 72 if (!isolate_lru_page(page))
71 putback_lru_page(page); 73 putback_lru_page(page);
74 else if (PageUnevictable(page))
75 count_vm_event(UNEVICTABLE_PGSTRANDED);
76
72 } 77 }
73} 78}
74 79
@@ -80,8 +85,12 @@ void mlock_vma_page(struct page *page)
80{ 85{
81 BUG_ON(!PageLocked(page)); 86 BUG_ON(!PageLocked(page));
82 87
83 if (!TestSetPageMlocked(page) && !isolate_lru_page(page)) 88 if (!TestSetPageMlocked(page)) {
84 putback_lru_page(page); 89 inc_zone_page_state(page, NR_MLOCK);
90 count_vm_event(UNEVICTABLE_PGMLOCKED);
91 if (!isolate_lru_page(page))
92 putback_lru_page(page);
93 }
85} 94}
86 95
87/* 96/*
@@ -106,9 +115,31 @@ static void munlock_vma_page(struct page *page)
106{ 115{
107 BUG_ON(!PageLocked(page)); 116 BUG_ON(!PageLocked(page));
108 117
109 if (TestClearPageMlocked(page) && !isolate_lru_page(page)) { 118 if (TestClearPageMlocked(page)) {
110 try_to_munlock(page); 119 dec_zone_page_state(page, NR_MLOCK);
111 putback_lru_page(page); 120 if (!isolate_lru_page(page)) {
121 int ret = try_to_munlock(page);
122 /*
123 * did try_to_unlock() succeed or punt?
124 */
125 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
126 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
127
128 putback_lru_page(page);
129 } else {
130 /*
131 * We lost the race. let try_to_unmap() deal
132 * with it. At least we get the page state and
133 * mlock stats right. However, page is still on
134 * the noreclaim list. We'll fix that up when
135 * the page is eventually freed or we scan the
136 * noreclaim list.
137 */
138 if (PageUnevictable(page))
139 count_vm_event(UNEVICTABLE_PGSTRANDED);
140 else
141 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
142 }
112 } 143 }
113} 144}
114 145
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6db2f6319313..9e28abc0a0b9 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -625,6 +625,7 @@ static const char * const vmstat_text[] = {
625 "nr_active_file", 625 "nr_active_file",
626#ifdef CONFIG_UNEVICTABLE_LRU 626#ifdef CONFIG_UNEVICTABLE_LRU
627 "nr_unevictable", 627 "nr_unevictable",
628 "nr_mlock",
628#endif 629#endif
629 "nr_anon_pages", 630 "nr_anon_pages",
630 "nr_mapped", 631 "nr_mapped",
@@ -684,6 +685,10 @@ static const char * const vmstat_text[] = {
684 "unevictable_pgs_culled", 685 "unevictable_pgs_culled",
685 "unevictable_pgs_scanned", 686 "unevictable_pgs_scanned",
686 "unevictable_pgs_rescued", 687 "unevictable_pgs_rescued",
688 "unevictable_pgs_mlocked",
689 "unevictable_pgs_munlocked",
690 "unevictable_pgs_cleared",
691 "unevictable_pgs_stranded",
687#endif 692#endif
688#endif 693#endif
689}; 694};