diff options
Diffstat (limited to 'include/linux/memcontrol.h')
| -rw-r--r-- | include/linux/memcontrol.h | 218 |
1 files changed, 217 insertions, 1 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 11ddc7ffeba8..0108a56f814e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -21,11 +21,14 @@ | |||
| 21 | #define _LINUX_MEMCONTROL_H | 21 | #define _LINUX_MEMCONTROL_H |
| 22 | #include <linux/cgroup.h> | 22 | #include <linux/cgroup.h> |
| 23 | #include <linux/vm_event_item.h> | 23 | #include <linux/vm_event_item.h> |
| 24 | #include <linux/hardirq.h> | ||
| 25 | #include <linux/jump_label.h> | ||
| 24 | 26 | ||
| 25 | struct mem_cgroup; | 27 | struct mem_cgroup; |
| 26 | struct page_cgroup; | 28 | struct page_cgroup; |
| 27 | struct page; | 29 | struct page; |
| 28 | struct mm_struct; | 30 | struct mm_struct; |
| 31 | struct kmem_cache; | ||
| 29 | 32 | ||
| 30 | /* Stats that can be updated by kernel. */ | 33 | /* Stats that can be updated by kernel. */ |
| 31 | enum mem_cgroup_page_stat_item { | 34 | enum mem_cgroup_page_stat_item { |
| @@ -181,7 +184,14 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
| 181 | gfp_t gfp_mask, | 184 | gfp_t gfp_mask, |
| 182 | unsigned long *total_scanned); | 185 | unsigned long *total_scanned); |
| 183 | 186 | ||
| 184 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); | 187 | void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); |
| 188 | static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, | ||
| 189 | enum vm_event_item idx) | ||
| 190 | { | ||
| 191 | if (mem_cgroup_disabled()) | ||
| 192 | return; | ||
| 193 | __mem_cgroup_count_vm_event(mm, idx); | ||
| 194 | } | ||
| 185 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 195 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 186 | void mem_cgroup_split_huge_fixup(struct page *head); | 196 | void mem_cgroup_split_huge_fixup(struct page *head); |
| 187 | #endif | 197 | #endif |
| @@ -407,5 +417,211 @@ static inline void sock_release_memcg(struct sock *sk) | |||
| 407 | { | 417 | { |
| 408 | } | 418 | } |
| 409 | #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ | 419 | #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ |
| 420 | |||
| 421 | #ifdef CONFIG_MEMCG_KMEM | ||
| 422 | extern struct static_key memcg_kmem_enabled_key; | ||
| 423 | |||
| 424 | extern int memcg_limited_groups_array_size; | ||
| 425 | |||
| 426 | /* | ||
| 427 | * Helper macro to loop through all memcg-specific caches. Callers must still | ||
| 428 | * check if the cache is valid (it is either valid or NULL). | ||
| 429 | * the slab_mutex must be held when looping through those caches | ||
| 430 | */ | ||
| 431 | #define for_each_memcg_cache_index(_idx) \ | ||
| 432 | for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) | ||
| 433 | |||
| 434 | static inline bool memcg_kmem_enabled(void) | ||
| 435 | { | ||
| 436 | return static_key_false(&memcg_kmem_enabled_key); | ||
| 437 | } | ||
| 438 | |||
| 439 | /* | ||
| 440 | * In general, we'll do everything in our power to not incur in any overhead | ||
| 441 | * for non-memcg users for the kmem functions. Not even a function call, if we | ||
| 442 | * can avoid it. | ||
| 443 | * | ||
| 444 | * Therefore, we'll inline all those functions so that in the best case, we'll | ||
| 445 | * see that kmemcg is off for everybody and proceed quickly. If it is on, | ||
| 446 | * we'll still do most of the flag checking inline. We check a lot of | ||
| 447 | * conditions, but because they are pretty simple, they are expected to be | ||
| 448 | * fast. | ||
| 449 | */ | ||
| 450 | bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, | ||
| 451 | int order); | ||
| 452 | void __memcg_kmem_commit_charge(struct page *page, | ||
| 453 | struct mem_cgroup *memcg, int order); | ||
| 454 | void __memcg_kmem_uncharge_pages(struct page *page, int order); | ||
| 455 | |||
| 456 | int memcg_cache_id(struct mem_cgroup *memcg); | ||
| 457 | int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | ||
| 458 | struct kmem_cache *root_cache); | ||
| 459 | void memcg_release_cache(struct kmem_cache *cachep); | ||
| 460 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); | ||
| 461 | |||
| 462 | int memcg_update_cache_size(struct kmem_cache *s, int num_groups); | ||
| 463 | void memcg_update_array_size(int num_groups); | ||
| 464 | |||
| 465 | struct kmem_cache * | ||
| 466 | __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); | ||
| 467 | |||
| 468 | void mem_cgroup_destroy_cache(struct kmem_cache *cachep); | ||
| 469 | void kmem_cache_destroy_memcg_children(struct kmem_cache *s); | ||
| 470 | |||
| 471 | /** | ||
| 472 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. | ||
| 473 | * @gfp: the gfp allocation flags. | ||
| 474 | * @memcg: a pointer to the memcg this was charged against. | ||
| 475 | * @order: allocation order. | ||
| 476 | * | ||
| 477 | * returns true if the memcg where the current task belongs can hold this | ||
| 478 | * allocation. | ||
| 479 | * | ||
| 480 | * We return true automatically if this allocation is not to be accounted to | ||
| 481 | * any memcg. | ||
| 482 | */ | ||
| 483 | static inline bool | ||
| 484 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
| 485 | { | ||
| 486 | if (!memcg_kmem_enabled()) | ||
| 487 | return true; | ||
| 488 | |||
| 489 | /* | ||
| 490 | * __GFP_NOFAIL allocations will move on even if charging is not | ||
| 491 | * possible. Therefore we don't even try, and have this allocation | ||
| 492 | * unaccounted. We could in theory charge it with | ||
| 493 | * res_counter_charge_nofail, but we hope those allocations are rare, | ||
| 494 | * and won't be worth the trouble. | ||
| 495 | */ | ||
| 496 | if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL)) | ||
| 497 | return true; | ||
| 498 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
| 499 | return true; | ||
| 500 | |||
| 501 | /* If the test is dying, just let it go. */ | ||
| 502 | if (unlikely(fatal_signal_pending(current))) | ||
| 503 | return true; | ||
| 504 | |||
| 505 | return __memcg_kmem_newpage_charge(gfp, memcg, order); | ||
| 506 | } | ||
| 507 | |||
| 508 | /** | ||
| 509 | * memcg_kmem_uncharge_pages: uncharge pages from memcg | ||
| 510 | * @page: pointer to struct page being freed | ||
| 511 | * @order: allocation order. | ||
| 512 | * | ||
| 513 | * there is no need to specify memcg here, since it is embedded in page_cgroup | ||
| 514 | */ | ||
| 515 | static inline void | ||
| 516 | memcg_kmem_uncharge_pages(struct page *page, int order) | ||
| 517 | { | ||
| 518 | if (memcg_kmem_enabled()) | ||
| 519 | __memcg_kmem_uncharge_pages(page, order); | ||
| 520 | } | ||
| 521 | |||
| 522 | /** | ||
| 523 | * memcg_kmem_commit_charge: embeds correct memcg in a page | ||
| 524 | * @page: pointer to struct page recently allocated | ||
| 525 | * @memcg: the memcg structure we charged against | ||
| 526 | * @order: allocation order. | ||
| 527 | * | ||
| 528 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or | ||
| 529 | * failure of the allocation. if @page is NULL, this function will revert the | ||
| 530 | * charges. Otherwise, it will commit the memcg given by @memcg to the | ||
| 531 | * corresponding page_cgroup. | ||
| 532 | */ | ||
| 533 | static inline void | ||
| 534 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
| 535 | { | ||
| 536 | if (memcg_kmem_enabled() && memcg) | ||
| 537 | __memcg_kmem_commit_charge(page, memcg, order); | ||
| 538 | } | ||
| 539 | |||
| 540 | /** | ||
| 541 | * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation | ||
| 542 | * @cachep: the original global kmem cache | ||
| 543 | * @gfp: allocation flags. | ||
| 544 | * | ||
| 545 | * This function assumes that the task allocating, which determines the memcg | ||
| 546 | * in the page allocator, belongs to the same cgroup throughout the whole | ||
| 547 | * process. Misacounting can happen if the task calls memcg_kmem_get_cache() | ||
| 548 | * while belonging to a cgroup, and later on changes. This is considered | ||
| 549 | * acceptable, and should only happen upon task migration. | ||
| 550 | * | ||
| 551 | * Before the cache is created by the memcg core, there is also a possible | ||
| 552 | * imbalance: the task belongs to a memcg, but the cache being allocated from | ||
| 553 | * is the global cache, since the child cache is not yet guaranteed to be | ||
| 554 | * ready. This case is also fine, since in this case the GFP_KMEMCG will not be | ||
| 555 | * passed and the page allocator will not attempt any cgroup accounting. | ||
| 556 | */ | ||
| 557 | static __always_inline struct kmem_cache * | ||
| 558 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | ||
| 559 | { | ||
| 560 | if (!memcg_kmem_enabled()) | ||
| 561 | return cachep; | ||
| 562 | if (gfp & __GFP_NOFAIL) | ||
| 563 | return cachep; | ||
| 564 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
| 565 | return cachep; | ||
| 566 | if (unlikely(fatal_signal_pending(current))) | ||
| 567 | return cachep; | ||
| 568 | |||
| 569 | return __memcg_kmem_get_cache(cachep, gfp); | ||
| 570 | } | ||
| 571 | #else | ||
| 572 | #define for_each_memcg_cache_index(_idx) \ | ||
| 573 | for (; NULL; ) | ||
| 574 | |||
| 575 | static inline bool memcg_kmem_enabled(void) | ||
| 576 | { | ||
| 577 | return false; | ||
| 578 | } | ||
| 579 | |||
| 580 | static inline bool | ||
| 581 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
| 582 | { | ||
| 583 | return true; | ||
| 584 | } | ||
| 585 | |||
| 586 | static inline void memcg_kmem_uncharge_pages(struct page *page, int order) | ||
| 587 | { | ||
| 588 | } | ||
| 589 | |||
| 590 | static inline void | ||
| 591 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
| 592 | { | ||
| 593 | } | ||
| 594 | |||
| 595 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | ||
| 596 | { | ||
| 597 | return -1; | ||
| 598 | } | ||
| 599 | |||
| 600 | static inline int | ||
| 601 | memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | ||
| 602 | struct kmem_cache *root_cache) | ||
| 603 | { | ||
| 604 | return 0; | ||
| 605 | } | ||
| 606 | |||
| 607 | static inline void memcg_release_cache(struct kmem_cache *cachep) | ||
| 608 | { | ||
| 609 | } | ||
| 610 | |||
| 611 | static inline void memcg_cache_list_add(struct mem_cgroup *memcg, | ||
| 612 | struct kmem_cache *s) | ||
| 613 | { | ||
| 614 | } | ||
| 615 | |||
| 616 | static inline struct kmem_cache * | ||
| 617 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | ||
| 618 | { | ||
| 619 | return cachep; | ||
| 620 | } | ||
| 621 | |||
| 622 | static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | ||
| 623 | { | ||
| 624 | } | ||
| 625 | #endif /* CONFIG_MEMCG_KMEM */ | ||
| 410 | #endif /* _LINUX_MEMCONTROL_H */ | 626 | #endif /* _LINUX_MEMCONTROL_H */ |
| 411 | 627 | ||
