diff options
-rw-r--r-- | include/linux/vmstat.h | 1 | ||||
-rw-r--r-- | mm/internal.h | 17 | ||||
-rw-r--r-- | mm/page_alloc.c | 1 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
4 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 05b805020be2..9cd3ab0f554d 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -49,6 +49,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
49 | UNEVICTABLE_PGMUNLOCKED, | 49 | UNEVICTABLE_PGMUNLOCKED, |
50 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | 50 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
51 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | 51 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
52 | UNEVICTABLE_MLOCKFREED, | ||
52 | #endif | 53 | #endif |
53 | NR_VM_EVENT_ITEMS | 54 | NR_VM_EVENT_ITEMS |
54 | }; | 55 | }; |
diff --git a/mm/internal.h b/mm/internal.h index 1cfbf2e2bc9e..e4e728bdf324 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -146,6 +146,22 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |||
146 | } | 146 | } |
147 | } | 147 | } |
148 | 148 | ||
149 | /* | ||
150 | * free_page_mlock() -- clean up attempts to free and mlocked() page. | ||
151 | * Page should not be on lru, so no need to fix that up. | ||
152 | * free_pages_check() will verify... | ||
153 | */ | ||
154 | static inline void free_page_mlock(struct page *page) | ||
155 | { | ||
156 | if (unlikely(TestClearPageMlocked(page))) { | ||
157 | unsigned long flags; | ||
158 | |||
159 | local_irq_save(flags); | ||
160 | __dec_zone_page_state(page, NR_MLOCK); | ||
161 | __count_vm_event(UNEVICTABLE_MLOCKFREED); | ||
162 | local_irq_restore(flags); | ||
163 | } | ||
164 | } | ||
149 | 165 | ||
150 | #else /* CONFIG_UNEVICTABLE_LRU */ | 166 | #else /* CONFIG_UNEVICTABLE_LRU */ |
151 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 167 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
@@ -155,6 +171,7 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | |||
155 | static inline void clear_page_mlock(struct page *page) { } | 171 | static inline void clear_page_mlock(struct page *page) { } |
156 | static inline void mlock_vma_page(struct page *page) { } | 172 | static inline void mlock_vma_page(struct page *page) { } |
157 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 173 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
174 | static inline void free_page_mlock(struct page *page) { } | ||
158 | 175 | ||
159 | #endif /* CONFIG_UNEVICTABLE_LRU */ | 176 | #endif /* CONFIG_UNEVICTABLE_LRU */ |
160 | 177 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5886586fde6c..cfbadad75d1d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -454,6 +454,7 @@ static inline void __free_one_page(struct page *page, | |||
454 | 454 | ||
455 | static inline int free_pages_check(struct page *page) | 455 | static inline int free_pages_check(struct page *page) |
456 | { | 456 | { |
457 | free_page_mlock(page); | ||
457 | if (unlikely(page_mapcount(page) | | 458 | if (unlikely(page_mapcount(page) | |
458 | (page->mapping != NULL) | | 459 | (page->mapping != NULL) | |
459 | (page_get_page_cgroup(page) != NULL) | | 460 | (page_get_page_cgroup(page) != NULL) | |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 9e28abc0a0b9..9343227c5c60 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -689,6 +689,7 @@ static const char * const vmstat_text[] = { | |||
689 | "unevictable_pgs_munlocked", | 689 | "unevictable_pgs_munlocked", |
690 | "unevictable_pgs_cleared", | 690 | "unevictable_pgs_cleared", |
691 | "unevictable_pgs_stranded", | 691 | "unevictable_pgs_stranded", |
692 | "unevictable_pgs_mlockfreed", | ||
692 | #endif | 693 | #endif |
693 | #endif | 694 | #endif |
694 | }; | 695 | }; |