aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h7
-rw-r--r--mm/memcontrol.c35
2 files changed, 7 insertions, 35 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 55f5ee7cc3d3..4cfdbcf8cf56 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1364,6 +1364,10 @@ struct task_struct {
1364 unsigned sched_reset_on_fork:1; 1364 unsigned sched_reset_on_fork:1;
1365 unsigned sched_contributes_to_load:1; 1365 unsigned sched_contributes_to_load:1;
1366 1366
1367#ifdef CONFIG_MEMCG_KMEM
1368 unsigned memcg_kmem_skip_account:1;
1369#endif
1370
1367 unsigned long atomic_flags; /* Flags needing atomic access. */ 1371 unsigned long atomic_flags; /* Flags needing atomic access. */
1368 1372
1369 pid_t pid; 1373 pid_t pid;
@@ -1679,8 +1683,7 @@ struct task_struct {
1679 /* bitmask and counter of trace recursion */ 1683 /* bitmask and counter of trace recursion */
1680 unsigned long trace_recursion; 1684 unsigned long trace_recursion;
1681#endif /* CONFIG_TRACING */ 1685#endif /* CONFIG_TRACING */
1682#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1686#ifdef CONFIG_MEMCG
1683 unsigned int memcg_kmem_skip_account;
1684 struct memcg_oom_info { 1687 struct memcg_oom_info {
1685 struct mem_cgroup *memcg; 1688 struct mem_cgroup *memcg;
1686 gfp_t gfp_mask; 1689 gfp_t gfp_mask;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d9fab72da52e..11cbfde4dc6d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2673,37 +2673,6 @@ static void memcg_unregister_cache(struct kmem_cache *cachep)
2673 css_put(&memcg->css); 2673 css_put(&memcg->css);
2674} 2674}
2675 2675
2676/*
2677 * During the creation a new cache, we need to disable our accounting mechanism
2678 * altogether. This is true even if we are not creating, but rather just
2679 * enqueing new caches to be created.
2680 *
2681 * This is because that process will trigger allocations; some visible, like
2682 * explicit kmallocs to auxiliary data structures, name strings and internal
2683 * cache structures; some well concealed, like INIT_WORK() that can allocate
2684 * objects during debug.
2685 *
2686 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
2687 * to it. This may not be a bounded recursion: since the first cache creation
2688 * failed to complete (waiting on the allocation), we'll just try to create the
2689 * cache again, failing at the same point.
2690 *
2691 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
2692 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
2693 * inside the following two functions.
2694 */
2695static inline void memcg_stop_kmem_account(void)
2696{
2697 VM_BUG_ON(!current->mm);
2698 current->memcg_kmem_skip_account++;
2699}
2700
2701static inline void memcg_resume_kmem_account(void)
2702{
2703 VM_BUG_ON(!current->mm);
2704 current->memcg_kmem_skip_account--;
2705}
2706
2707int __memcg_cleanup_cache_params(struct kmem_cache *s) 2676int __memcg_cleanup_cache_params(struct kmem_cache *s)
2708{ 2677{
2709 struct kmem_cache *c; 2678 struct kmem_cache *c;
@@ -2798,9 +2767,9 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
2798 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2767 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2799 * the safest choice is to do it like this, wrapping the whole function. 2768 * the safest choice is to do it like this, wrapping the whole function.
2800 */ 2769 */
2801 memcg_stop_kmem_account(); 2770 current->memcg_kmem_skip_account = 1;
2802 __memcg_schedule_register_cache(memcg, cachep); 2771 __memcg_schedule_register_cache(memcg, cachep);
2803 memcg_resume_kmem_account(); 2772 current->memcg_kmem_skip_account = 0;
2804} 2773}
2805 2774
2806int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order) 2775int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)