diff options
Diffstat (limited to 'include/linux/memcontrol.h')
| -rw-r--r-- | include/linux/memcontrol.h | 164 |
1 files changed, 53 insertions, 111 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..cd0e2413c358 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -213,6 +213,9 @@ struct mem_cgroup { | |||
| 213 | /* OOM-Killer disable */ | 213 | /* OOM-Killer disable */ |
| 214 | int oom_kill_disable; | 214 | int oom_kill_disable; |
| 215 | 215 | ||
| 216 | /* handle for "memory.events" */ | ||
| 217 | struct cgroup_file events_file; | ||
| 218 | |||
| 216 | /* protect arrays of thresholds */ | 219 | /* protect arrays of thresholds */ |
| 217 | struct mutex thresholds_lock; | 220 | struct mutex thresholds_lock; |
| 218 | 221 | ||
| @@ -242,7 +245,6 @@ struct mem_cgroup { | |||
| 242 | * percpu counter. | 245 | * percpu counter. |
| 243 | */ | 246 | */ |
| 244 | struct mem_cgroup_stat_cpu __percpu *stat; | 247 | struct mem_cgroup_stat_cpu __percpu *stat; |
| 245 | spinlock_t pcp_counter_lock; | ||
| 246 | 248 | ||
| 247 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | 249 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) |
| 248 | struct cg_proto tcp_mem; | 250 | struct cg_proto tcp_mem; |
| @@ -286,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, | |||
| 286 | unsigned int nr) | 288 | unsigned int nr) |
| 287 | { | 289 | { |
| 288 | this_cpu_add(memcg->stat->events[idx], nr); | 290 | this_cpu_add(memcg->stat->events[idx], nr); |
| 291 | cgroup_file_notify(&memcg->events_file); | ||
| 289 | } | 292 | } |
| 290 | 293 | ||
| 291 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | 294 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); |
| @@ -298,8 +301,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); | |||
| 298 | void mem_cgroup_uncharge(struct page *page); | 301 | void mem_cgroup_uncharge(struct page *page); |
| 299 | void mem_cgroup_uncharge_list(struct list_head *page_list); | 302 | void mem_cgroup_uncharge_list(struct list_head *page_list); |
| 300 | 303 | ||
| 301 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, | 304 | void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); |
| 302 | bool lrucare); | ||
| 303 | 305 | ||
| 304 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 306 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); |
| 305 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | 307 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
| @@ -347,9 +349,7 @@ ino_t page_cgroup_ino(struct page *page); | |||
| 347 | 349 | ||
| 348 | static inline bool mem_cgroup_disabled(void) | 350 | static inline bool mem_cgroup_disabled(void) |
| 349 | { | 351 | { |
| 350 | if (memory_cgrp_subsys.disabled) | 352 | return !cgroup_subsys_enabled(memory_cgrp_subsys); |
| 351 | return true; | ||
| 352 | return false; | ||
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | /* | 355 | /* |
| @@ -383,7 +383,7 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | |||
| 383 | return mz->lru_size[lru]; | 383 | return mz->lru_size[lru]; |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | 386 | static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
| 387 | { | 387 | { |
| 388 | unsigned long inactive_ratio; | 388 | unsigned long inactive_ratio; |
| 389 | unsigned long inactive; | 389 | unsigned long inactive; |
| @@ -402,24 +402,26 @@ static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | |||
| 402 | return inactive * inactive_ratio < active; | 402 | return inactive * inactive_ratio < active; |
| 403 | } | 403 | } |
| 404 | 404 | ||
| 405 | void mem_cgroup_handle_over_high(void); | ||
| 406 | |||
| 405 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 407 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
| 406 | struct task_struct *p); | 408 | struct task_struct *p); |
| 407 | 409 | ||
| 408 | static inline void mem_cgroup_oom_enable(void) | 410 | static inline void mem_cgroup_oom_enable(void) |
| 409 | { | 411 | { |
| 410 | WARN_ON(current->memcg_oom.may_oom); | 412 | WARN_ON(current->memcg_may_oom); |
| 411 | current->memcg_oom.may_oom = 1; | 413 | current->memcg_may_oom = 1; |
| 412 | } | 414 | } |
| 413 | 415 | ||
| 414 | static inline void mem_cgroup_oom_disable(void) | 416 | static inline void mem_cgroup_oom_disable(void) |
| 415 | { | 417 | { |
| 416 | WARN_ON(!current->memcg_oom.may_oom); | 418 | WARN_ON(!current->memcg_may_oom); |
| 417 | current->memcg_oom.may_oom = 0; | 419 | current->memcg_may_oom = 0; |
| 418 | } | 420 | } |
| 419 | 421 | ||
| 420 | static inline bool task_in_memcg_oom(struct task_struct *p) | 422 | static inline bool task_in_memcg_oom(struct task_struct *p) |
| 421 | { | 423 | { |
| 422 | return p->memcg_oom.memcg; | 424 | return p->memcg_in_oom; |
| 423 | } | 425 | } |
| 424 | 426 | ||
| 425 | bool mem_cgroup_oom_synchronize(bool wait); | 427 | bool mem_cgroup_oom_synchronize(bool wait); |
| @@ -536,9 +538,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) | |||
| 536 | { | 538 | { |
| 537 | } | 539 | } |
| 538 | 540 | ||
| 539 | static inline void mem_cgroup_migrate(struct page *oldpage, | 541 | static inline void mem_cgroup_replace_page(struct page *old, struct page *new) |
| 540 | struct page *newpage, | ||
| 541 | bool lrucare) | ||
| 542 | { | 542 | { |
| 543 | } | 543 | } |
| 544 | 544 | ||
| @@ -584,10 +584,10 @@ static inline bool mem_cgroup_disabled(void) | |||
| 584 | return true; | 584 | return true; |
| 585 | } | 585 | } |
| 586 | 586 | ||
| 587 | static inline int | 587 | static inline bool |
| 588 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | 588 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
| 589 | { | 589 | { |
| 590 | return 1; | 590 | return true; |
| 591 | } | 591 | } |
| 592 | 592 | ||
| 593 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) | 593 | static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) |
| @@ -621,6 +621,10 @@ static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) | |||
| 621 | { | 621 | { |
| 622 | } | 622 | } |
| 623 | 623 | ||
| 624 | static inline void mem_cgroup_handle_over_high(void) | ||
| 625 | { | ||
| 626 | } | ||
| 627 | |||
| 624 | static inline void mem_cgroup_oom_enable(void) | 628 | static inline void mem_cgroup_oom_enable(void) |
| 625 | { | 629 | { |
| 626 | } | 630 | } |
| @@ -677,8 +681,9 @@ enum { | |||
| 677 | 681 | ||
| 678 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); | 682 | struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); |
| 679 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); | 683 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
| 680 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, | 684 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
| 681 | unsigned long *pdirty, unsigned long *pwriteback); | 685 | unsigned long *pheadroom, unsigned long *pdirty, |
| 686 | unsigned long *pwriteback); | ||
| 682 | 687 | ||
| 683 | #else /* CONFIG_CGROUP_WRITEBACK */ | 688 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 684 | 689 | ||
| @@ -688,7 +693,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |||
| 688 | } | 693 | } |
| 689 | 694 | ||
| 690 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, | 695 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
| 691 | unsigned long *pavail, | 696 | unsigned long *pfilepages, |
| 697 | unsigned long *pheadroom, | ||
| 692 | unsigned long *pdirty, | 698 | unsigned long *pdirty, |
| 693 | unsigned long *pwriteback) | 699 | unsigned long *pwriteback) |
| 694 | { | 700 | { |
| @@ -745,11 +751,10 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | |||
| 745 | * conditions, but because they are pretty simple, they are expected to be | 751 | * conditions, but because they are pretty simple, they are expected to be |
| 746 | * fast. | 752 | * fast. |
| 747 | */ | 753 | */ |
| 748 | bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, | 754 | int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, |
| 749 | int order); | 755 | struct mem_cgroup *memcg); |
| 750 | void __memcg_kmem_commit_charge(struct page *page, | 756 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); |
| 751 | struct mem_cgroup *memcg, int order); | 757 | void __memcg_kmem_uncharge(struct page *page, int order); |
| 752 | void __memcg_kmem_uncharge_pages(struct page *page, int order); | ||
| 753 | 758 | ||
| 754 | /* | 759 | /* |
| 755 | * helper for acessing a memcg's index. It will be used as an index in the | 760 | * helper for acessing a memcg's index. It will be used as an index in the |
| @@ -764,77 +769,42 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) | |||
| 764 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); | 769 | struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); |
| 765 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); | 770 | void __memcg_kmem_put_cache(struct kmem_cache *cachep); |
| 766 | 771 | ||
| 767 | struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); | 772 | static inline bool __memcg_kmem_bypass(gfp_t gfp) |
| 768 | |||
| 769 | int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, | ||
| 770 | unsigned long nr_pages); | ||
| 771 | void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); | ||
| 772 | |||
| 773 | /** | ||
| 774 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. | ||
| 775 | * @gfp: the gfp allocation flags. | ||
| 776 | * @memcg: a pointer to the memcg this was charged against. | ||
| 777 | * @order: allocation order. | ||
| 778 | * | ||
| 779 | * returns true if the memcg where the current task belongs can hold this | ||
| 780 | * allocation. | ||
| 781 | * | ||
| 782 | * We return true automatically if this allocation is not to be accounted to | ||
| 783 | * any memcg. | ||
| 784 | */ | ||
| 785 | static inline bool | ||
| 786 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
| 787 | { | 773 | { |
| 788 | if (!memcg_kmem_enabled()) | 774 | if (!memcg_kmem_enabled()) |
| 789 | return true; | 775 | return true; |
| 790 | |||
| 791 | if (gfp & __GFP_NOACCOUNT) | 776 | if (gfp & __GFP_NOACCOUNT) |
| 792 | return true; | 777 | return true; |
| 793 | /* | ||
| 794 | * __GFP_NOFAIL allocations will move on even if charging is not | ||
| 795 | * possible. Therefore we don't even try, and have this allocation | ||
| 796 | * unaccounted. We could in theory charge it forcibly, but we hope | ||
| 797 | * those allocations are rare, and won't be worth the trouble. | ||
| 798 | */ | ||
| 799 | if (gfp & __GFP_NOFAIL) | ||
| 800 | return true; | ||
| 801 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | 778 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) |
| 802 | return true; | 779 | return true; |
| 803 | 780 | return false; | |
| 804 | /* If the test is dying, just let it go. */ | ||
| 805 | if (unlikely(fatal_signal_pending(current))) | ||
| 806 | return true; | ||
| 807 | |||
| 808 | return __memcg_kmem_newpage_charge(gfp, memcg, order); | ||
| 809 | } | 781 | } |
| 810 | 782 | ||
| 811 | /** | 783 | /** |
| 812 | * memcg_kmem_uncharge_pages: uncharge pages from memcg | 784 | * memcg_kmem_charge: charge a kmem page |
| 813 | * @page: pointer to struct page being freed | 785 | * @page: page to charge |
| 814 | * @order: allocation order. | 786 | * @gfp: reclaim mode |
| 787 | * @order: allocation order | ||
| 788 | * | ||
| 789 | * Returns 0 on success, an error code on failure. | ||
| 815 | */ | 790 | */ |
| 816 | static inline void | 791 | static __always_inline int memcg_kmem_charge(struct page *page, |
| 817 | memcg_kmem_uncharge_pages(struct page *page, int order) | 792 | gfp_t gfp, int order) |
| 818 | { | 793 | { |
| 819 | if (memcg_kmem_enabled()) | 794 | if (__memcg_kmem_bypass(gfp)) |
| 820 | __memcg_kmem_uncharge_pages(page, order); | 795 | return 0; |
| 796 | return __memcg_kmem_charge(page, gfp, order); | ||
| 821 | } | 797 | } |
| 822 | 798 | ||
| 823 | /** | 799 | /** |
| 824 | * memcg_kmem_commit_charge: embeds correct memcg in a page | 800 | * memcg_kmem_uncharge: uncharge a kmem page |
| 825 | * @page: pointer to struct page recently allocated | 801 | * @page: page to uncharge |
| 826 | * @memcg: the memcg structure we charged against | 802 | * @order: allocation order |
| 827 | * @order: allocation order. | ||
| 828 | * | ||
| 829 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or | ||
| 830 | * failure of the allocation. if @page is NULL, this function will revert the | ||
| 831 | * charges. Otherwise, it will commit @page to @memcg. | ||
| 832 | */ | 803 | */ |
| 833 | static inline void | 804 | static __always_inline void memcg_kmem_uncharge(struct page *page, int order) |
| 834 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
| 835 | { | 805 | { |
| 836 | if (memcg_kmem_enabled() && memcg) | 806 | if (memcg_kmem_enabled()) |
| 837 | __memcg_kmem_commit_charge(page, memcg, order); | 807 | __memcg_kmem_uncharge(page, order); |
| 838 | } | 808 | } |
| 839 | 809 | ||
| 840 | /** | 810 | /** |
| @@ -847,17 +817,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | |||
| 847 | static __always_inline struct kmem_cache * | 817 | static __always_inline struct kmem_cache * |
| 848 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | 818 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) |
| 849 | { | 819 | { |
| 850 | if (!memcg_kmem_enabled()) | 820 | if (__memcg_kmem_bypass(gfp)) |
| 851 | return cachep; | ||
| 852 | if (gfp & __GFP_NOACCOUNT) | ||
| 853 | return cachep; | ||
| 854 | if (gfp & __GFP_NOFAIL) | ||
| 855 | return cachep; | 821 | return cachep; |
| 856 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
| 857 | return cachep; | ||
| 858 | if (unlikely(fatal_signal_pending(current))) | ||
| 859 | return cachep; | ||
| 860 | |||
| 861 | return __memcg_kmem_get_cache(cachep); | 822 | return __memcg_kmem_get_cache(cachep); |
| 862 | } | 823 | } |
| 863 | 824 | ||
| @@ -866,13 +827,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | |||
| 866 | if (memcg_kmem_enabled()) | 827 | if (memcg_kmem_enabled()) |
| 867 | __memcg_kmem_put_cache(cachep); | 828 | __memcg_kmem_put_cache(cachep); |
| 868 | } | 829 | } |
| 869 | |||
| 870 | static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) | ||
| 871 | { | ||
| 872 | if (!memcg_kmem_enabled()) | ||
| 873 | return NULL; | ||
| 874 | return __mem_cgroup_from_kmem(ptr); | ||
| 875 | } | ||
| 876 | #else | 830 | #else |
| 877 | #define for_each_memcg_cache_index(_idx) \ | 831 | #define for_each_memcg_cache_index(_idx) \ |
| 878 | for (; NULL; ) | 832 | for (; NULL; ) |
| @@ -887,18 +841,12 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) | |||
| 887 | return false; | 841 | return false; |
| 888 | } | 842 | } |
| 889 | 843 | ||
| 890 | static inline bool | 844 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) |
| 891 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
| 892 | { | 845 | { |
| 893 | return true; | 846 | return 0; |
| 894 | } | 847 | } |
| 895 | 848 | ||
| 896 | static inline void memcg_kmem_uncharge_pages(struct page *page, int order) | 849 | static inline void memcg_kmem_uncharge(struct page *page, int order) |
| 897 | { | ||
| 898 | } | ||
| 899 | |||
| 900 | static inline void | ||
| 901 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
| 902 | { | 850 | { |
| 903 | } | 851 | } |
| 904 | 852 | ||
| @@ -924,11 +872,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 924 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) | 872 | static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) |
| 925 | { | 873 | { |
| 926 | } | 874 | } |
| 927 | |||
| 928 | static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) | ||
| 929 | { | ||
| 930 | return NULL; | ||
| 931 | } | ||
| 932 | #endif /* CONFIG_MEMCG_KMEM */ | 875 | #endif /* CONFIG_MEMCG_KMEM */ |
| 933 | #endif /* _LINUX_MEMCONTROL_H */ | 876 | #endif /* _LINUX_MEMCONTROL_H */ |
| 934 | |||
