aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2018-01-31 19:16:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 20:18:36 -0500
commit284542656e22c43fdada8c8cc0ca9ede8453eed7 (patch)
tree5f815001470c5f76ede887f9da9272772e9219ca
parentc9019e9bf42e66d028d70d2da6206cad4dd9250d (diff)
mm: memcontrol: implement lruvec stat functions on top of each other
The implementation of the lruvec stat functions and their variants for accounting through a page, or accounting from a preemptible context, are mostly identical and needlessly repetitive. Implement the lruvec_page functions by looking up the page's lruvec and then using the lruvec function. Implement the functions for preemptible contexts by disabling preemption before calling the atomic context functions. Link: http://lkml.kernel.org/r/20171103153336.24044-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h44
1 files changed, 22 insertions, 22 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2c80b69dd266..1ffc54ac4cc9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
569{ 569{
570 struct mem_cgroup_per_node *pn; 570 struct mem_cgroup_per_node *pn;
571 571
572 /* Update node */
572 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 573 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
574
573 if (mem_cgroup_disabled()) 575 if (mem_cgroup_disabled())
574 return; 576 return;
577
575 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 578 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
579
580 /* Update memcg */
576 __mod_memcg_state(pn->memcg, idx, val); 581 __mod_memcg_state(pn->memcg, idx, val);
582
583 /* Update lruvec */
577 __this_cpu_add(pn->lruvec_stat->count[idx], val); 584 __this_cpu_add(pn->lruvec_stat->count[idx], val);
578} 585}
579 586
580static inline void mod_lruvec_state(struct lruvec *lruvec, 587static inline void mod_lruvec_state(struct lruvec *lruvec,
581 enum node_stat_item idx, int val) 588 enum node_stat_item idx, int val)
582{ 589{
583 struct mem_cgroup_per_node *pn; 590 preempt_disable();
584 591 __mod_lruvec_state(lruvec, idx, val);
585 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 592 preempt_enable();
586 if (mem_cgroup_disabled())
587 return;
588 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
589 mod_memcg_state(pn->memcg, idx, val);
590 this_cpu_add(pn->lruvec_stat->count[idx], val);
591} 593}
592 594
593static inline void __mod_lruvec_page_state(struct page *page, 595static inline void __mod_lruvec_page_state(struct page *page,
594 enum node_stat_item idx, int val) 596 enum node_stat_item idx, int val)
595{ 597{
596 struct mem_cgroup_per_node *pn; 598 pg_data_t *pgdat = page_pgdat(page);
599 struct lruvec *lruvec;
597 600
598 __mod_node_page_state(page_pgdat(page), idx, val); 601 /* Untracked pages have no memcg, no lruvec. Update only the node */
599 if (mem_cgroup_disabled() || !page->mem_cgroup) 602 if (!page->mem_cgroup) {
603 __mod_node_page_state(pgdat, idx, val);
600 return; 604 return;
601 __mod_memcg_state(page->mem_cgroup, idx, val); 605 }
602 pn = page->mem_cgroup->nodeinfo[page_to_nid(page)]; 606
603 __this_cpu_add(pn->lruvec_stat->count[idx], val); 607 lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
608 __mod_lruvec_state(lruvec, idx, val);
604} 609}
605 610
606static inline void mod_lruvec_page_state(struct page *page, 611static inline void mod_lruvec_page_state(struct page *page,
607 enum node_stat_item idx, int val) 612 enum node_stat_item idx, int val)
608{ 613{
609 struct mem_cgroup_per_node *pn; 614 preempt_disable();
610 615 __mod_lruvec_page_state(page, idx, val);
611 mod_node_page_state(page_pgdat(page), idx, val); 616 preempt_enable();
612 if (mem_cgroup_disabled() || !page->mem_cgroup)
613 return;
614 mod_memcg_state(page->mem_cgroup, idx, val);
615 pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
616 this_cpu_add(pn->lruvec_stat->count[idx], val);
617} 617}
618 618
619unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 619unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,