diff options
author | Hugh Dickins <hughd@google.com> | 2016-05-19 20:12:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 22:12:14 -0400 |
commit | 9d5e6a9f22311b00a20ff9b072760ad3e73f0d99 (patch) | |
tree | a9b5b5e2607c7ecbba341e1d06e4220eae032031 /include/linux/mm_inline.h | |
parent | ca707239e8a7958ffb1c31737d41cae1a674c938 (diff) |
mm: update_lru_size do the __mod_zone_page_state
Konstantin Khlebnikov pointed out (nearly four years ago, when lumpy
reclaim was removed) that lru_size can be updated by -nr_taken once per
call to isolate_lru_pages(), instead of page by page.
Update it inside isolate_lru_pages(), or at its two callsites? I chose
to update it at the callsites, rearranging and grouping the updates by
nr_taken and nr_scanned together in both.
With one exception, mem_cgroup_update_lru_size(,lru,) is then used where
__mod_zone_page_state(,NR_LRU_BASE+lru,) is used; and we shall be adding
some more calls in a future commit. Make the code a little smaller and
simpler by incorporating stat update in lru_size update.
The exception was move_active_pages_to_lru(), which aggregated the
pgmoved stat update separately from the individual lru_size updates; but
I still think this a simplification worth making.
However, the __mod_zone_page_state is not peculiar to mem_cgroups: so
better use the name update_lru_size, calls mem_cgroup_update_lru_size
when CONFIG_MEMCG.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm_inline.h')
-rw-r--r-- | include/linux/mm_inline.h | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index d8cea81ab1ac..5bd29ba4f174 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -22,22 +22,34 @@ static inline int page_is_file_cache(struct page *page) | |||
22 | return !PageSwapBacked(page); | 22 | return !PageSwapBacked(page); |
23 | } | 23 | } |
24 | 24 | ||
25 | static __always_inline void __update_lru_size(struct lruvec *lruvec, | ||
26 | enum lru_list lru, int nr_pages) | ||
27 | { | ||
28 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); | ||
29 | } | ||
30 | |||
31 | static __always_inline void update_lru_size(struct lruvec *lruvec, | ||
32 | enum lru_list lru, int nr_pages) | ||
33 | { | ||
34 | #ifdef CONFIG_MEMCG | ||
35 | mem_cgroup_update_lru_size(lruvec, lru, nr_pages); | ||
36 | #else | ||
37 | __update_lru_size(lruvec, lru, nr_pages); | ||
38 | #endif | ||
39 | } | ||
40 | |||
25 | static __always_inline void add_page_to_lru_list(struct page *page, | 41 | static __always_inline void add_page_to_lru_list(struct page *page, |
26 | struct lruvec *lruvec, enum lru_list lru) | 42 | struct lruvec *lruvec, enum lru_list lru) |
27 | { | 43 | { |
28 | int nr_pages = hpage_nr_pages(page); | 44 | update_lru_size(lruvec, lru, hpage_nr_pages(page)); |
29 | mem_cgroup_update_lru_size(lruvec, lru, nr_pages); | ||
30 | list_add(&page->lru, &lruvec->lists[lru]); | 45 | list_add(&page->lru, &lruvec->lists[lru]); |
31 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); | ||
32 | } | 46 | } |
33 | 47 | ||
34 | static __always_inline void del_page_from_lru_list(struct page *page, | 48 | static __always_inline void del_page_from_lru_list(struct page *page, |
35 | struct lruvec *lruvec, enum lru_list lru) | 49 | struct lruvec *lruvec, enum lru_list lru) |
36 | { | 50 | { |
37 | int nr_pages = hpage_nr_pages(page); | ||
38 | list_del(&page->lru); | 51 | list_del(&page->lru); |
39 | mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); | 52 | update_lru_size(lruvec, lru, -hpage_nr_pages(page)); |
40 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); | ||
41 | } | 53 | } |
42 | 54 | ||
43 | /** | 55 | /** |