diff options
Diffstat (limited to 'include/linux/memcontrol.h')
| -rw-r--r-- | include/linux/memcontrol.h | 92 |
1 files changed, 77 insertions, 15 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7c95af8d552c..72dff5fb0d0c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -52,7 +52,27 @@ struct mem_cgroup_reclaim_cookie { | |||
| 52 | unsigned int generation; | 52 | unsigned int generation; |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | enum mem_cgroup_events_index { | ||
| 56 | MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ | ||
| 57 | MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ | ||
| 58 | MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ | ||
| 59 | MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ | ||
| 60 | MEM_CGROUP_EVENTS_NSTATS, | ||
| 61 | /* default hierarchy events */ | ||
| 62 | MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS, | ||
| 63 | MEMCG_HIGH, | ||
| 64 | MEMCG_MAX, | ||
| 65 | MEMCG_OOM, | ||
| 66 | MEMCG_NR_EVENTS, | ||
| 67 | }; | ||
| 68 | |||
| 55 | #ifdef CONFIG_MEMCG | 69 | #ifdef CONFIG_MEMCG |
| 70 | void mem_cgroup_events(struct mem_cgroup *memcg, | ||
| 71 | enum mem_cgroup_events_index idx, | ||
| 72 | unsigned int nr); | ||
| 73 | |||
| 74 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | ||
| 75 | |||
| 56 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, | 76 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
| 57 | gfp_t gfp_mask, struct mem_cgroup **memcgp); | 77 | gfp_t gfp_mask, struct mem_cgroup **memcgp); |
| 58 | void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, | 78 | void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, |
| @@ -102,6 +122,7 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |||
| 102 | * For memory reclaim. | 122 | * For memory reclaim. |
| 103 | */ | 123 | */ |
| 104 | int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); | 124 | int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); |
| 125 | bool mem_cgroup_lruvec_online(struct lruvec *lruvec); | ||
| 105 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | 126 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
| 106 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); | 127 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); |
| 107 | void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); | 128 | void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); |
| @@ -138,12 +159,10 @@ static inline bool mem_cgroup_disabled(void) | |||
| 138 | return false; | 159 | return false; |
| 139 | } | 160 | } |
| 140 | 161 | ||
| 141 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, | 162 | struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); |
| 142 | unsigned long *flags); | ||
| 143 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, | ||
| 144 | unsigned long *flags); | ||
| 145 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, | 163 | void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, |
| 146 | enum mem_cgroup_stat_index idx, int val); | 164 | enum mem_cgroup_stat_index idx, int val); |
| 165 | void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); | ||
| 147 | 166 | ||
| 148 | static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, | 167 | static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, |
| 149 | enum mem_cgroup_stat_index idx) | 168 | enum mem_cgroup_stat_index idx) |
| @@ -176,6 +195,18 @@ void mem_cgroup_split_huge_fixup(struct page *head); | |||
| 176 | #else /* CONFIG_MEMCG */ | 195 | #else /* CONFIG_MEMCG */ |
| 177 | struct mem_cgroup; | 196 | struct mem_cgroup; |
| 178 | 197 | ||
| 198 | static inline void mem_cgroup_events(struct mem_cgroup *memcg, | ||
| 199 | enum mem_cgroup_events_index idx, | ||
| 200 | unsigned int nr) | ||
| 201 | { | ||
| 202 | } | ||
| 203 | |||
| 204 | static inline bool mem_cgroup_low(struct mem_cgroup *root, | ||
| 205 | struct mem_cgroup *memcg) | ||
| 206 | { | ||
| 207 | return false; | ||
| 208 | } | ||
| 209 | |||
| 179 | static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, | 210 | static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
| 180 | gfp_t gfp_mask, | 211 | gfp_t gfp_mask, |
| 181 | struct mem_cgroup **memcgp) | 212 | struct mem_cgroup **memcgp) |
| @@ -268,6 +299,11 @@ mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | |||
| 268 | return 1; | 299 | return 1; |
| 269 | } | 300 | } |
| 270 | 301 | ||
| 302 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | ||
| 303 | { | ||
| 304 | return true; | ||
| 305 | } | ||
| 306 | |||
| 271 | static inline unsigned long | 307 | static inline unsigned long |
| 272 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | 308 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
| 273 | { | 309 | { |
| @@ -285,14 +321,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
| 285 | { | 321 | { |
| 286 | } | 322 | } |
| 287 | 323 | ||
| 288 | static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, | 324 | static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) |
| 289 | bool *locked, unsigned long *flags) | ||
| 290 | { | 325 | { |
| 291 | return NULL; | 326 | return NULL; |
| 292 | } | 327 | } |
| 293 | 328 | ||
| 294 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, | 329 | static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) |
| 295 | bool *locked, unsigned long *flags) | ||
| 296 | { | 330 | { |
| 297 | } | 331 | } |
| 298 | 332 | ||
| @@ -364,7 +398,9 @@ static inline void sock_release_memcg(struct sock *sk) | |||
| 364 | #ifdef CONFIG_MEMCG_KMEM | 398 | #ifdef CONFIG_MEMCG_KMEM |
| 365 | extern struct static_key memcg_kmem_enabled_key; | 399 | extern struct static_key memcg_kmem_enabled_key; |
| 366 | 400 | ||
| 367 | extern int memcg_limited_groups_array_size; | 401 | extern int memcg_nr_cache_ids; |
| 402 | extern void memcg_get_cache_ids(void); | ||
| 403 | extern void memcg_put_cache_ids(void); | ||
| 368 | 404 | ||
| 369 | /* | 405 | /* |
| 370 | * Helper macro to loop through all memcg-specific caches. Callers must still | 406 | * Helper macro to loop through all memcg-specific caches. Callers must still |
| @@ -372,13 +408,15 @@ extern int memcg_limited_groups_array_size; | |||
| 372 | * the slab_mutex must be held when looping through those caches | 408 | * the slab_mutex must be held when looping through those caches |
| 373 | */ | 409 | */ |
| 374 | #define for_each_memcg_cache_index(_idx) \ | 410 | #define for_each_memcg_cache_index(_idx) \ |
| 375 | for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) | 411 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
| 376 | 412 | ||
| 377 | static inline bool memcg_kmem_enabled(void) | 413 | static inline bool memcg_kmem_enabled(void) |
| 378 | { | 414 | { |
| 379 | return static_key_false(&memcg_kmem_enabled_key); | 415 | return static_key_false(&memcg_kmem_enabled_key); |
| 380 | } | 416 | } |
| 381 | 417 | ||
| 418 | bool memcg_kmem_is_active(struct mem_cgroup *memcg); | ||
| 419 | |||
| 382 | /* | 420 | /* |
| 383 | * In general, we'll do everything in our power to not incur in any overhead | 421 | * In general, we'll do everything in our power to not incur in any overhead |
| 384 | * for non-memcg users for the kmem functions. Not even a function call, if we | 422 | * for non-memcg users for the kmem functions. Not even a function call, if we |
| @@ -398,15 +436,14 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); | |||
| 398 | 436 | ||
| 399 | int memcg_cache_id(struct mem_cgroup *memcg); | 437 | int memcg_cache_id(struct mem_cgroup *memcg); |
| 400 | 438 | ||
| 401 | void memcg_update_array_size(int num_groups); | ||
| 402 | |||
| 403 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); | 439 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); |
| 404 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); | 440 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
| 405 | 441 | ||
| 406 | int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); | 442 | struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); |
| 407 | void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); | ||
| 408 | 443 | ||
| 409 | int __memcg_cleanup_cache_params(struct kmem_cache *s); | 444 | int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, |
| 445 | unsigned long nr_pages); | ||
| 446 | void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); | ||
| 410 | 447 | ||
| 411 | /** | 448 | /** |
| 412 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. | 449 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. |
| @@ -500,6 +537,13 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | |||
| 500 | if (memcg_kmem_enabled()) | 537 | if (memcg_kmem_enabled()) |
| 501 | __memcg_kmem_put_cache(cachep); | 538 | __memcg_kmem_put_cache(cachep); |
| 502 | } | 539 | } |
| 540 | |||
| 541 | static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) | ||
| 542 | { | ||
| 543 | if (!memcg_kmem_enabled()) | ||
| 544 | return NULL; | ||
| 545 | return __mem_cgroup_from_kmem(ptr); | ||
| 546 | } | ||
| 503 | #else | 547 | #else |
| 504 | #define for_each_memcg_cache_index(_idx) \ | 548 | #define for_each_memcg_cache_index(_idx) \ |
| 505 | for (; NULL; ) | 549 | for (; NULL; ) |
| @@ -509,6 +553,11 @@ static inline bool memcg_kmem_enabled(void) | |||
| 509 | return false; | 553 | return false; |
| 510 | } | 554 | } |
| 511 | 555 | ||
| 556 | static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | ||
| 557 | { | ||
| 558 | return false; | ||
| 559 | } | ||
| 560 | |||
| 512 | static inline bool | 561 | static inline bool |
| 513 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | 562 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) |
| 514 | { | 563 | { |
| @@ -529,6 +578,14 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) | |||
| 529 | return -1; | 578 | return -1; |
| 530 | } | 579 | } |
| 531 | 580 | ||
| 581 | static inline void memcg_get_cache_ids(void) | ||
| 582 | { | ||
| 583 | } | ||
| 584 | |||
| 585 | static inline void memcg_put_cache_ids(void) | ||
| 586 | { | ||
| 587 | } | ||
| 588 | |||
| 532 | static inline struct kmem_cache * | 589 | static inline struct kmem_cache * |
| 533 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | 590 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) |
| 534 | { | 591 | { |
| @@ -538,6 +595,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 538 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | 595 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) |
| 539 | { | 596 | { |
| 540 | } | 597 | } |
| 598 | |||
| 599 | static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) | ||
| 600 | { | ||
| 601 | return NULL; | ||
| 602 | } | ||
| 541 | #endif /* CONFIG_MEMCG_KMEM */ | 603 | #endif /* CONFIG_MEMCG_KMEM */ |
| 542 | #endif /* _LINUX_MEMCONTROL_H */ | 604 | #endif /* _LINUX_MEMCONTROL_H */ |
| 543 | 605 | ||
