diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 63 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slab.h | 23 | ||||
-rw-r--r-- | mm/slub.c | 7 |
4 files changed, 95 insertions, 1 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cc13797d0fb..270a3678985 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2779,6 +2779,19 @@ static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) | |||
2779 | (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); | 2779 | (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); |
2780 | } | 2780 | } |
2781 | 2781 | ||
2782 | /* | ||
2783 | * This is a bit cumbersome, but it is rarely used and avoids a backpointer | ||
2784 | * in the memcg_cache_params struct. | ||
2785 | */ | ||
2786 | static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) | ||
2787 | { | ||
2788 | struct kmem_cache *cachep; | ||
2789 | |||
2790 | VM_BUG_ON(p->is_root_cache); | ||
2791 | cachep = p->root_cache; | ||
2792 | return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; | ||
2793 | } | ||
2794 | |||
2782 | static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) | 2795 | static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) |
2783 | { | 2796 | { |
2784 | struct res_counter *fail_res; | 2797 | struct res_counter *fail_res; |
@@ -3056,6 +3069,31 @@ static inline void memcg_resume_kmem_account(void) | |||
3056 | current->memcg_kmem_skip_account--; | 3069 | current->memcg_kmem_skip_account--; |
3057 | } | 3070 | } |
3058 | 3071 | ||
3072 | static void kmem_cache_destroy_work_func(struct work_struct *w) | ||
3073 | { | ||
3074 | struct kmem_cache *cachep; | ||
3075 | struct memcg_cache_params *p; | ||
3076 | |||
3077 | p = container_of(w, struct memcg_cache_params, destroy); | ||
3078 | |||
3079 | cachep = memcg_params_to_cache(p); | ||
3080 | |||
3081 | if (!atomic_read(&cachep->memcg_params->nr_pages)) | ||
3082 | kmem_cache_destroy(cachep); | ||
3083 | } | ||
3084 | |||
3085 | void mem_cgroup_destroy_cache(struct kmem_cache *cachep) | ||
3086 | { | ||
3087 | if (!cachep->memcg_params->dead) | ||
3088 | return; | ||
3089 | |||
3090 | /* | ||
3091 | * We have to defer the actual destroying to a workqueue, because | ||
3092 | * we might currently be in a context that cannot sleep. | ||
3093 | */ | ||
3094 | schedule_work(&cachep->memcg_params->destroy); | ||
3095 | } | ||
3096 | |||
3059 | static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) | 3097 | static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) |
3060 | { | 3098 | { |
3061 | char *name; | 3099 | char *name; |
@@ -3125,6 +3163,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
3125 | 3163 | ||
3126 | mem_cgroup_get(memcg); | 3164 | mem_cgroup_get(memcg); |
3127 | new_cachep->memcg_params->root_cache = cachep; | 3165 | new_cachep->memcg_params->root_cache = cachep; |
3166 | atomic_set(&new_cachep->memcg_params->nr_pages , 0); | ||
3128 | 3167 | ||
3129 | cachep->memcg_params->memcg_caches[idx] = new_cachep; | 3168 | cachep->memcg_params->memcg_caches[idx] = new_cachep; |
3130 | /* | 3169 | /* |
@@ -3143,6 +3182,25 @@ struct create_work { | |||
3143 | struct work_struct work; | 3182 | struct work_struct work; |
3144 | }; | 3183 | }; |
3145 | 3184 | ||
3185 | static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) | ||
3186 | { | ||
3187 | struct kmem_cache *cachep; | ||
3188 | struct memcg_cache_params *params; | ||
3189 | |||
3190 | if (!memcg_kmem_is_active(memcg)) | ||
3191 | return; | ||
3192 | |||
3193 | mutex_lock(&memcg->slab_caches_mutex); | ||
3194 | list_for_each_entry(params, &memcg->memcg_slab_caches, list) { | ||
3195 | cachep = memcg_params_to_cache(params); | ||
3196 | cachep->memcg_params->dead = true; | ||
3197 | INIT_WORK(&cachep->memcg_params->destroy, | ||
3198 | kmem_cache_destroy_work_func); | ||
3199 | schedule_work(&cachep->memcg_params->destroy); | ||
3200 | } | ||
3201 | mutex_unlock(&memcg->slab_caches_mutex); | ||
3202 | } | ||
3203 | |||
3146 | static void memcg_create_cache_work_func(struct work_struct *w) | 3204 | static void memcg_create_cache_work_func(struct work_struct *w) |
3147 | { | 3205 | { |
3148 | struct create_work *cw; | 3206 | struct create_work *cw; |
@@ -3358,6 +3416,10 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) | |||
3358 | VM_BUG_ON(mem_cgroup_is_root(memcg)); | 3416 | VM_BUG_ON(mem_cgroup_is_root(memcg)); |
3359 | memcg_uncharge_kmem(memcg, PAGE_SIZE << order); | 3417 | memcg_uncharge_kmem(memcg, PAGE_SIZE << order); |
3360 | } | 3418 | } |
3419 | #else | ||
3420 | static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) | ||
3421 | { | ||
3422 | } | ||
3361 | #endif /* CONFIG_MEMCG_KMEM */ | 3423 | #endif /* CONFIG_MEMCG_KMEM */ |
3362 | 3424 | ||
3363 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 3425 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -5975,6 +6037,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont) | |||
5975 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 6037 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
5976 | 6038 | ||
5977 | mem_cgroup_reparent_charges(memcg); | 6039 | mem_cgroup_reparent_charges(memcg); |
6040 | mem_cgroup_destroy_all_caches(memcg); | ||
5978 | } | 6041 | } |
5979 | 6042 | ||
5980 | static void mem_cgroup_css_free(struct cgroup *cont) | 6043 | static void mem_cgroup_css_free(struct cgroup *cont) |
@@ -1895,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1895 | if (page->pfmemalloc) | 1895 | if (page->pfmemalloc) |
1896 | SetPageSlabPfmemalloc(page + i); | 1896 | SetPageSlabPfmemalloc(page + i); |
1897 | } | 1897 | } |
1898 | memcg_bind_pages(cachep, cachep->gfporder); | ||
1898 | 1899 | ||
1899 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | 1900 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { |
1900 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); | 1901 | kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); |
@@ -1931,6 +1932,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |||
1931 | __ClearPageSlab(page); | 1932 | __ClearPageSlab(page); |
1932 | page++; | 1933 | page++; |
1933 | } | 1934 | } |
1935 | |||
1936 | memcg_release_pages(cachep, cachep->gfporder); | ||
1934 | if (current->reclaim_state) | 1937 | if (current->reclaim_state) |
1935 | current->reclaim_state->reclaimed_slab += nr_freed; | 1938 | current->reclaim_state->reclaimed_slab += nr_freed; |
1936 | free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); | 1939 | free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); |
@@ -117,6 +117,21 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, | |||
117 | (cachep->memcg_params->memcg == memcg); | 117 | (cachep->memcg_params->memcg == memcg); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline void memcg_bind_pages(struct kmem_cache *s, int order) | ||
121 | { | ||
122 | if (!is_root_cache(s)) | ||
123 | atomic_add(1 << order, &s->memcg_params->nr_pages); | ||
124 | } | ||
125 | |||
126 | static inline void memcg_release_pages(struct kmem_cache *s, int order) | ||
127 | { | ||
128 | if (is_root_cache(s)) | ||
129 | return; | ||
130 | |||
131 | if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) | ||
132 | mem_cgroup_destroy_cache(s); | ||
133 | } | ||
134 | |||
120 | static inline bool slab_equal_or_root(struct kmem_cache *s, | 135 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
121 | struct kmem_cache *p) | 136 | struct kmem_cache *p) |
122 | { | 137 | { |
@@ -135,6 +150,14 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, | |||
135 | return true; | 150 | return true; |
136 | } | 151 | } |
137 | 152 | ||
153 | static inline void memcg_bind_pages(struct kmem_cache *s, int order) | ||
154 | { | ||
155 | } | ||
156 | |||
157 | static inline void memcg_release_pages(struct kmem_cache *s, int order) | ||
158 | { | ||
159 | } | ||
160 | |||
138 | static inline bool slab_equal_or_root(struct kmem_cache *s, | 161 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
139 | struct kmem_cache *p) | 162 | struct kmem_cache *p) |
140 | { | 163 | { |
@@ -1344,6 +1344,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1344 | void *start; | 1344 | void *start; |
1345 | void *last; | 1345 | void *last; |
1346 | void *p; | 1346 | void *p; |
1347 | int order; | ||
1347 | 1348 | ||
1348 | BUG_ON(flags & GFP_SLAB_BUG_MASK); | 1349 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
1349 | 1350 | ||
@@ -1352,7 +1353,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1352 | if (!page) | 1353 | if (!page) |
1353 | goto out; | 1354 | goto out; |
1354 | 1355 | ||
1356 | order = compound_order(page); | ||
1355 | inc_slabs_node(s, page_to_nid(page), page->objects); | 1357 | inc_slabs_node(s, page_to_nid(page), page->objects); |
1358 | memcg_bind_pages(s, order); | ||
1356 | page->slab_cache = s; | 1359 | page->slab_cache = s; |
1357 | __SetPageSlab(page); | 1360 | __SetPageSlab(page); |
1358 | if (page->pfmemalloc) | 1361 | if (page->pfmemalloc) |
@@ -1361,7 +1364,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1361 | start = page_address(page); | 1364 | start = page_address(page); |
1362 | 1365 | ||
1363 | if (unlikely(s->flags & SLAB_POISON)) | 1366 | if (unlikely(s->flags & SLAB_POISON)) |
1364 | memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); | 1367 | memset(start, POISON_INUSE, PAGE_SIZE << order); |
1365 | 1368 | ||
1366 | last = start; | 1369 | last = start; |
1367 | for_each_object(p, s, start, page->objects) { | 1370 | for_each_object(p, s, start, page->objects) { |
@@ -1402,6 +1405,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1402 | 1405 | ||
1403 | __ClearPageSlabPfmemalloc(page); | 1406 | __ClearPageSlabPfmemalloc(page); |
1404 | __ClearPageSlab(page); | 1407 | __ClearPageSlab(page); |
1408 | |||
1409 | memcg_release_pages(s, order); | ||
1405 | reset_page_mapcount(page); | 1410 | reset_page_mapcount(page); |
1406 | if (current->reclaim_state) | 1411 | if (current->reclaim_state) |
1407 | current->reclaim_state->reclaimed_slab += pages; | 1412 | current->reclaim_state->reclaimed_slab += pages; |