aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:21:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:12 -0500
commit7ae1e1d0f8ac2927ed7e3ca6d15e42d485903459 (patch)
tree6b95f008400510bee9a7742ee21bf5316a59f851 /include/linux/memcontrol.h
parent7a64bf05b2a6fe3703062d13d389e3eb904741c6 (diff)
memcg: kmem controller infrastructure
Introduce infrastructure for tracking kernel memory pages to a given memcg. This will happen whenever the caller includes the flag __GFP_KMEMCG flag, and the task belong to a memcg other than the root. In memcontrol.h those functions are wrapped in inline acessors. The idea is to later on, patch those with static branches, so we don't incur any overhead when no mem cgroups with limited kmem are being used. Users of this functionality shall interact with the memcg core code through the following functions: memcg_kmem_newpage_charge: will return true if the group can handle the allocation. At this point, struct page is not yet allocated. memcg_kmem_commit_charge: will either revert the charge, if struct page allocation failed, or embed memcg information into page_cgroup. memcg_kmem_uncharge_page: called at free time, will revert the charge. Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <tj@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: JoonSoo Kim <js1304@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h110
1 files changed, 110 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e98a74c0c9c0..afa2ad40457e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -21,6 +21,7 @@
21#define _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h> 22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h> 23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h>
24 25
25struct mem_cgroup; 26struct mem_cgroup;
26struct page_cgroup; 27struct page_cgroup;
@@ -414,5 +415,114 @@ static inline void sock_release_memcg(struct sock *sk)
414{ 415{
415} 416}
416#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 417#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
418
419#ifdef CONFIG_MEMCG_KMEM
420static inline bool memcg_kmem_enabled(void)
421{
422 return true;
423}
424
425/*
426 * In general, we'll do everything in our power to not incur in any overhead
427 * for non-memcg users for the kmem functions. Not even a function call, if we
428 * can avoid it.
429 *
430 * Therefore, we'll inline all those functions so that in the best case, we'll
431 * see that kmemcg is off for everybody and proceed quickly. If it is on,
432 * we'll still do most of the flag checking inline. We check a lot of
433 * conditions, but because they are pretty simple, they are expected to be
434 * fast.
435 */
436bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
437 int order);
438void __memcg_kmem_commit_charge(struct page *page,
439 struct mem_cgroup *memcg, int order);
440void __memcg_kmem_uncharge_pages(struct page *page, int order);
441
442/**
443 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
444 * @gfp: the gfp allocation flags.
445 * @memcg: a pointer to the memcg this was charged against.
446 * @order: allocation order.
447 *
448 * returns true if the memcg where the current task belongs can hold this
449 * allocation.
450 *
451 * We return true automatically if this allocation is not to be accounted to
452 * any memcg.
453 */
454static inline bool
455memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
456{
457 if (!memcg_kmem_enabled())
458 return true;
459
460 /*
461 * __GFP_NOFAIL allocations will move on even if charging is not
462 * possible. Therefore we don't even try, and have this allocation
463 * unaccounted. We could in theory charge it with
464 * res_counter_charge_nofail, but we hope those allocations are rare,
465 * and won't be worth the trouble.
466 */
467 if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
468 return true;
469 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
470 return true;
471
472 /* If the test is dying, just let it go. */
473 if (unlikely(fatal_signal_pending(current)))
474 return true;
475
476 return __memcg_kmem_newpage_charge(gfp, memcg, order);
477}
478
479/**
480 * memcg_kmem_uncharge_pages: uncharge pages from memcg
481 * @page: pointer to struct page being freed
482 * @order: allocation order.
483 *
484 * there is no need to specify memcg here, since it is embedded in page_cgroup
485 */
486static inline void
487memcg_kmem_uncharge_pages(struct page *page, int order)
488{
489 if (memcg_kmem_enabled())
490 __memcg_kmem_uncharge_pages(page, order);
491}
492
493/**
494 * memcg_kmem_commit_charge: embeds correct memcg in a page
495 * @page: pointer to struct page recently allocated
496 * @memcg: the memcg structure we charged against
497 * @order: allocation order.
498 *
499 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
500 * failure of the allocation. if @page is NULL, this function will revert the
501 * charges. Otherwise, it will commit the memcg given by @memcg to the
502 * corresponding page_cgroup.
503 */
504static inline void
505memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
506{
507 if (memcg_kmem_enabled() && memcg)
508 __memcg_kmem_commit_charge(page, memcg, order);
509}
510
511#else
512static inline bool
513memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
514{
515 return true;
516}
517
518static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
519{
520}
521
522static inline void
523memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
524{
525}
526#endif /* CONFIG_MEMCG_KMEM */
417#endif /* _LINUX_MEMCONTROL_H */ 527#endif /* _LINUX_MEMCONTROL_H */
418 528