aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLee Schermerhorn <lee.schermerhorn@hp.com>2008-10-18 23:26:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:31 -0400
commit985737cf2ea096ea946aed82c7484d40defc71a8 (patch)
treeb96dc3b8c28f743857a7a1fff25472d6e0f60120 /mm
parentaf936a1606246a10c145feac3770f6287f483f02 (diff)
mlock: count attempts to free mlocked page
Allow free of mlock()ed pages. This shouldn't happen, but during developement, it occasionally did. This patch allows us to survive that condition, while keeping the statistics and events correct for debug. Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h17
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/vmstat.c1
3 files changed, 19 insertions, 0 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 1cfbf2e2bc9e..e4e728bdf324 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -146,6 +146,22 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
146 } 146 }
147} 147}
148 148
149/*
150 * free_page_mlock() -- clean up attempts to free and mlocked() page.
151 * Page should not be on lru, so no need to fix that up.
152 * free_pages_check() will verify...
153 */
154static inline void free_page_mlock(struct page *page)
155{
156 if (unlikely(TestClearPageMlocked(page))) {
157 unsigned long flags;
158
159 local_irq_save(flags);
160 __dec_zone_page_state(page, NR_MLOCK);
161 __count_vm_event(UNEVICTABLE_MLOCKFREED);
162 local_irq_restore(flags);
163 }
164}
149 165
150#else /* CONFIG_UNEVICTABLE_LRU */ 166#else /* CONFIG_UNEVICTABLE_LRU */
151static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 167static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
@@ -155,6 +171,7 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
155static inline void clear_page_mlock(struct page *page) { } 171static inline void clear_page_mlock(struct page *page) { }
156static inline void mlock_vma_page(struct page *page) { } 172static inline void mlock_vma_page(struct page *page) { }
157static inline void mlock_migrate_page(struct page *new, struct page *old) { } 173static inline void mlock_migrate_page(struct page *new, struct page *old) { }
174static inline void free_page_mlock(struct page *page) { }
158 175
159#endif /* CONFIG_UNEVICTABLE_LRU */ 176#endif /* CONFIG_UNEVICTABLE_LRU */
160 177
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5886586fde6c..cfbadad75d1d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -454,6 +454,7 @@ static inline void __free_one_page(struct page *page,
454 454
455static inline int free_pages_check(struct page *page) 455static inline int free_pages_check(struct page *page)
456{ 456{
457 free_page_mlock(page);
457 if (unlikely(page_mapcount(page) | 458 if (unlikely(page_mapcount(page) |
458 (page->mapping != NULL) | 459 (page->mapping != NULL) |
459 (page_get_page_cgroup(page) != NULL) | 460 (page_get_page_cgroup(page) != NULL) |
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 9e28abc0a0b9..9343227c5c60 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -689,6 +689,7 @@ static const char * const vmstat_text[] = {
689 "unevictable_pgs_munlocked", 689 "unevictable_pgs_munlocked",
690 "unevictable_pgs_cleared", 690 "unevictable_pgs_cleared",
691 "unevictable_pgs_stranded", 691 "unevictable_pgs_stranded",
692 "unevictable_pgs_mlockfreed",
692#endif 693#endif
693#endif 694#endif
694}; 695};