aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/memcontrol.h41
1 files changed, 41 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 45085e14e023..bd9b5d73bc2b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -449,6 +449,10 @@ void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
449 449
450int memcg_update_cache_size(struct kmem_cache *s, int num_groups); 450int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
451void memcg_update_array_size(int num_groups); 451void memcg_update_array_size(int num_groups);
452
453struct kmem_cache *
454__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
455
452/** 456/**
453 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 457 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
454 * @gfp: the gfp allocation flags. 458 * @gfp: the gfp allocation flags.
@@ -518,6 +522,37 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
518 __memcg_kmem_commit_charge(page, memcg, order); 522 __memcg_kmem_commit_charge(page, memcg, order);
519} 523}
520 524
525/**
526 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
527 * @cachep: the original global kmem cache
528 * @gfp: allocation flags.
529 *
530 * This function assumes that the task allocating, which determines the memcg
531 * in the page allocator, belongs to the same cgroup throughout the whole
532 * process. Misacounting can happen if the task calls memcg_kmem_get_cache()
533 * while belonging to a cgroup, and later on changes. This is considered
534 * acceptable, and should only happen upon task migration.
535 *
536 * Before the cache is created by the memcg core, there is also a possible
537 * imbalance: the task belongs to a memcg, but the cache being allocated from
538 * is the global cache, since the child cache is not yet guaranteed to be
539 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
540 * passed and the page allocator will not attempt any cgroup accounting.
541 */
542static __always_inline struct kmem_cache *
543memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
544{
545 if (!memcg_kmem_enabled())
546 return cachep;
547 if (gfp & __GFP_NOFAIL)
548 return cachep;
549 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
550 return cachep;
551 if (unlikely(fatal_signal_pending(current)))
552 return cachep;
553
554 return __memcg_kmem_get_cache(cachep, gfp);
555}
521#else 556#else
522static inline bool 557static inline bool
523memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 558memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
@@ -553,6 +588,12 @@ static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
553 struct kmem_cache *s) 588 struct kmem_cache *s)
554{ 589{
555} 590}
591
592static inline struct kmem_cache *
593memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
594{
595 return cachep;
596}
556#endif /* CONFIG_MEMCG_KMEM */ 597#endif /* CONFIG_MEMCG_KMEM */
557#endif /* _LINUX_MEMCONTROL_H */ 598#endif /* _LINUX_MEMCONTROL_H */
558 599