diff options
-rw-r--r-- | include/linux/memcontrol.h | 6 | ||||
-rw-r--r-- | mm/slab.c | 1 | ||||
-rw-r--r-- | mm/slub.c | 21 |
3 files changed, 24 insertions, 4 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ea02ff970836..0108a56f814e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -422,6 +422,12 @@ static inline void sock_release_memcg(struct sock *sk) | |||
422 | extern struct static_key memcg_kmem_enabled_key; | 422 | extern struct static_key memcg_kmem_enabled_key; |
423 | 423 | ||
424 | extern int memcg_limited_groups_array_size; | 424 | extern int memcg_limited_groups_array_size; |
425 | |||
426 | /* | ||
427 | * Helper macro to loop through all memcg-specific caches. Callers must still | ||
428 | * check if the cache is valid (it is either valid or NULL). | ||
429 | * the slab_mutex must be held when looping through those caches | ||
430 | */ | ||
425 | #define for_each_memcg_cache_index(_idx) \ | 431 | #define for_each_memcg_cache_index(_idx) \ |
426 | for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) | 432 | for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) |
427 | 433 | ||
@@ -4099,6 +4099,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
4099 | if ((ret < 0) || !is_root_cache(cachep)) | 4099 | if ((ret < 0) || !is_root_cache(cachep)) |
4100 | return ret; | 4100 | return ret; |
4101 | 4101 | ||
4102 | VM_BUG_ON(!mutex_is_locked(&slab_mutex)); | ||
4102 | for_each_memcg_cache_index(i) { | 4103 | for_each_memcg_cache_index(i) { |
4103 | c = cache_from_memcg(cachep, i); | 4104 | c = cache_from_memcg(cachep, i); |
4104 | if (c) | 4105 | if (c) |
@@ -5108,12 +5108,25 @@ static ssize_t slab_attr_store(struct kobject *kobj, | |||
5108 | if (s->max_attr_size < len) | 5108 | if (s->max_attr_size < len) |
5109 | s->max_attr_size = len; | 5109 | s->max_attr_size = len; |
5110 | 5110 | ||
5111 | /* | ||
5112 | * This is a best effort propagation, so this function's return | ||
5113 | * value will be determined by the parent cache only. This is | ||
5114 | * basically because not all attributes will have a well | ||
5115 | * defined semantics for rollbacks - most of the actions will | ||
5116 | * have permanent effects. | ||
5117 | * | ||
5118 | * Returning the error value of any of the children that fail | ||
5119 | * is not 100 % defined, in the sense that users seeing the | ||
5120 | * error code won't be able to know anything about the state of | ||
5121 | * the cache. | ||
5122 | * | ||
5123 | * Only returning the error code for the parent cache at least | ||
5124 | * has well defined semantics. The cache being written to | ||
5125 | * directly either failed or succeeded, in which case we loop | ||
5126 | * through the descendants with best-effort propagation. | ||
5127 | */ | ||
5111 | for_each_memcg_cache_index(i) { | 5128 | for_each_memcg_cache_index(i) { |
5112 | struct kmem_cache *c = cache_from_memcg(s, i); | 5129 | struct kmem_cache *c = cache_from_memcg(s, i); |
5113 | /* | ||
5114 | * This function's return value is determined by the | ||
5115 | * parent cache only | ||
5116 | */ | ||
5117 | if (c) | 5130 | if (c) |
5118 | attribute->store(c, buf, len); | 5131 | attribute->store(c, buf, len); |
5119 | } | 5132 | } |