diff options
-rw-r--r-- | mm/internal.h | 7 | ||||
-rw-r--r-- | mm/rmap.c | 11 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
3 files changed, 20 insertions, 2 deletions
diff --git a/mm/internal.h b/mm/internal.h index a25424a24e0c..e067984bafa0 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -201,7 +201,12 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma, | |||
201 | return 0; | 201 | return 0; |
202 | 202 | ||
203 | if (!TestSetPageMlocked(page)) { | 203 | if (!TestSetPageMlocked(page)) { |
204 | mod_zone_page_state(page_zone(page), NR_MLOCK, | 204 | /* |
205 | * We use the irq-unsafe __mod_zone_page_stat because this | ||
206 | * counter is not modified from interrupt context, and the pte | ||
207 | * lock is held(spinlock), which implies preemption disabled. | ||
208 | */ | ||
209 | __mod_zone_page_state(page_zone(page), NR_MLOCK, | ||
205 | hpage_nr_pages(page)); | 210 | hpage_nr_pages(page)); |
206 | count_vm_event(UNEVICTABLE_PGMLOCKED); | 211 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
207 | } | 212 | } |
@@ -988,6 +988,12 @@ void do_page_add_anon_rmap(struct page *page, | |||
988 | { | 988 | { |
989 | int first = atomic_inc_and_test(&page->_mapcount); | 989 | int first = atomic_inc_and_test(&page->_mapcount); |
990 | if (first) { | 990 | if (first) { |
991 | /* | ||
992 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | ||
993 | * these counters are not modified in interrupt context, and | ||
994 | * pte lock(a spinlock) is held, which implies preemption | ||
995 | * disabled. | ||
996 | */ | ||
991 | if (PageTransHuge(page)) | 997 | if (PageTransHuge(page)) |
992 | __inc_zone_page_state(page, | 998 | __inc_zone_page_state(page, |
993 | NR_ANON_TRANSPARENT_HUGEPAGES); | 999 | NR_ANON_TRANSPARENT_HUGEPAGES); |
@@ -1079,6 +1085,11 @@ void page_remove_rmap(struct page *page) | |||
1079 | /* | 1085 | /* |
1080 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED | 1086 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED |
1081 | * and not charged by memcg for now. | 1087 | * and not charged by memcg for now. |
1088 | * | ||
1089 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | ||
1090 | * these counters are not modified in interrupt context, and | ||
1091 | * these counters are not modified in interrupt context, and | ||
1092 | * pte lock(a spinlock) is held, which implies preemption disabled. | ||
1082 | */ | 1093 | */ |
1083 | if (unlikely(PageHuge(page))) | 1094 | if (unlikely(PageHuge(page))) |
1084 | goto out; | 1095 | goto out; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 376bd2d21482..b37bd49bfd55 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -207,7 +207,9 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | /* | 209 | /* |
210 | * For use when we know that interrupts are disabled. | 210 | * For use when we know that interrupts are disabled, |
211 | * or when we know that preemption is disabled and that | ||
212 | * particular counter cannot be updated from interrupt context. | ||
211 | */ | 213 | */ |
212 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 214 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
213 | int delta) | 215 | int delta) |