diff options
Diffstat (limited to 'include/linux/memcontrol.h')
| -rw-r--r-- | include/linux/memcontrol.h | 100 |
1 files changed, 43 insertions, 57 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0bb5f055bd26..0fa1f5de6841 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -35,40 +35,45 @@ struct page; | |||
| 35 | struct mm_struct; | 35 | struct mm_struct; |
| 36 | struct kmem_cache; | 36 | struct kmem_cache; |
| 37 | 37 | ||
| 38 | /* | 38 | /* Cgroup-specific page state, on top of universal node page state */ |
| 39 | * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, | 39 | enum memcg_stat_item { |
| 40 | * These two lists should keep in accord with each other. | 40 | MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, |
| 41 | */ | 41 | MEMCG_RSS, |
| 42 | enum mem_cgroup_stat_index { | 42 | MEMCG_RSS_HUGE, |
| 43 | /* | 43 | MEMCG_SWAP, |
| 44 | * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | 44 | MEMCG_SOCK, |
| 45 | */ | 45 | /* XXX: why are these zone and not node counters? */ |
| 46 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ | 46 | MEMCG_KERNEL_STACK_KB, |
| 47 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ | ||
| 48 | MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ | ||
| 49 | MEM_CGROUP_STAT_SHMEM, /* # of pages charged as shmem */ | ||
| 50 | MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ | ||
| 51 | MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ | ||
| 52 | MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ | ||
| 53 | MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ | ||
| 54 | MEM_CGROUP_STAT_NSTATS, | ||
| 55 | /* default hierarchy stats */ | ||
| 56 | MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS, | ||
| 57 | MEMCG_SLAB_RECLAIMABLE, | 47 | MEMCG_SLAB_RECLAIMABLE, |
| 58 | MEMCG_SLAB_UNRECLAIMABLE, | 48 | MEMCG_SLAB_UNRECLAIMABLE, |
| 59 | MEMCG_SOCK, | ||
| 60 | MEMCG_WORKINGSET_REFAULT, | ||
| 61 | MEMCG_WORKINGSET_ACTIVATE, | ||
| 62 | MEMCG_WORKINGSET_NODERECLAIM, | ||
| 63 | MEMCG_NR_STAT, | 49 | MEMCG_NR_STAT, |
| 64 | }; | 50 | }; |
| 65 | 51 | ||
| 52 | /* Cgroup-specific events, on top of universal VM events */ | ||
| 53 | enum memcg_event_item { | ||
| 54 | MEMCG_LOW = NR_VM_EVENT_ITEMS, | ||
| 55 | MEMCG_HIGH, | ||
| 56 | MEMCG_MAX, | ||
| 57 | MEMCG_OOM, | ||
| 58 | MEMCG_NR_EVENTS, | ||
| 59 | }; | ||
| 60 | |||
| 66 | struct mem_cgroup_reclaim_cookie { | 61 | struct mem_cgroup_reclaim_cookie { |
| 67 | pg_data_t *pgdat; | 62 | pg_data_t *pgdat; |
| 68 | int priority; | 63 | int priority; |
| 69 | unsigned int generation; | 64 | unsigned int generation; |
| 70 | }; | 65 | }; |
| 71 | 66 | ||
| 67 | #ifdef CONFIG_MEMCG | ||
| 68 | |||
| 69 | #define MEM_CGROUP_ID_SHIFT 16 | ||
| 70 | #define MEM_CGROUP_ID_MAX USHRT_MAX | ||
| 71 | |||
| 72 | struct mem_cgroup_id { | ||
| 73 | int id; | ||
| 74 | atomic_t ref; | ||
| 75 | }; | ||
| 76 | |||
| 72 | /* | 77 | /* |
| 73 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | 78 | * Per memcg event counter is incremented at every pagein/pageout. With THP, |
| 74 | * it will be incremated by the number of pages. This counter is used for | 79 | * it will be incremated by the number of pages. This counter is used for |
| @@ -82,25 +87,6 @@ enum mem_cgroup_events_target { | |||
| 82 | MEM_CGROUP_NTARGETS, | 87 | MEM_CGROUP_NTARGETS, |
| 83 | }; | 88 | }; |
| 84 | 89 | ||
| 85 | #ifdef CONFIG_MEMCG | ||
| 86 | |||
| 87 | #define MEM_CGROUP_ID_SHIFT 16 | ||
| 88 | #define MEM_CGROUP_ID_MAX USHRT_MAX | ||
| 89 | |||
| 90 | struct mem_cgroup_id { | ||
| 91 | int id; | ||
| 92 | atomic_t ref; | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* Cgroup-specific events, on top of universal VM events */ | ||
| 96 | enum memcg_event_item { | ||
| 97 | MEMCG_LOW = NR_VM_EVENT_ITEMS, | ||
| 98 | MEMCG_HIGH, | ||
| 99 | MEMCG_MAX, | ||
| 100 | MEMCG_OOM, | ||
| 101 | MEMCG_NR_EVENTS, | ||
| 102 | }; | ||
| 103 | |||
| 104 | struct mem_cgroup_stat_cpu { | 90 | struct mem_cgroup_stat_cpu { |
| 105 | long count[MEMCG_NR_STAT]; | 91 | long count[MEMCG_NR_STAT]; |
| 106 | unsigned long events[MEMCG_NR_EVENTS]; | 92 | unsigned long events[MEMCG_NR_EVENTS]; |
| @@ -487,7 +473,7 @@ void lock_page_memcg(struct page *page); | |||
| 487 | void unlock_page_memcg(struct page *page); | 473 | void unlock_page_memcg(struct page *page); |
| 488 | 474 | ||
| 489 | static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, | 475 | static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, |
| 490 | enum mem_cgroup_stat_index idx) | 476 | enum memcg_stat_item idx) |
| 491 | { | 477 | { |
| 492 | long val = 0; | 478 | long val = 0; |
| 493 | int cpu; | 479 | int cpu; |
| @@ -502,20 +488,20 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, | |||
| 502 | } | 488 | } |
| 503 | 489 | ||
| 504 | static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, | 490 | static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, |
| 505 | enum mem_cgroup_stat_index idx, int val) | 491 | enum memcg_stat_item idx, int val) |
| 506 | { | 492 | { |
| 507 | if (!mem_cgroup_disabled()) | 493 | if (!mem_cgroup_disabled()) |
| 508 | this_cpu_add(memcg->stat->count[idx], val); | 494 | this_cpu_add(memcg->stat->count[idx], val); |
| 509 | } | 495 | } |
| 510 | 496 | ||
| 511 | static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, | 497 | static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, |
| 512 | enum mem_cgroup_stat_index idx) | 498 | enum memcg_stat_item idx) |
| 513 | { | 499 | { |
| 514 | mem_cgroup_update_stat(memcg, idx, 1); | 500 | mem_cgroup_update_stat(memcg, idx, 1); |
| 515 | } | 501 | } |
| 516 | 502 | ||
| 517 | static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, | 503 | static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, |
| 518 | enum mem_cgroup_stat_index idx) | 504 | enum memcg_stat_item idx) |
| 519 | { | 505 | { |
| 520 | mem_cgroup_update_stat(memcg, idx, -1); | 506 | mem_cgroup_update_stat(memcg, idx, -1); |
| 521 | } | 507 | } |
| @@ -538,20 +524,20 @@ static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, | |||
| 538 | * Kernel pages are an exception to this, since they'll never move. | 524 | * Kernel pages are an exception to this, since they'll never move. |
| 539 | */ | 525 | */ |
| 540 | static inline void mem_cgroup_update_page_stat(struct page *page, | 526 | static inline void mem_cgroup_update_page_stat(struct page *page, |
| 541 | enum mem_cgroup_stat_index idx, int val) | 527 | enum memcg_stat_item idx, int val) |
| 542 | { | 528 | { |
| 543 | if (page->mem_cgroup) | 529 | if (page->mem_cgroup) |
| 544 | mem_cgroup_update_stat(page->mem_cgroup, idx, val); | 530 | mem_cgroup_update_stat(page->mem_cgroup, idx, val); |
| 545 | } | 531 | } |
| 546 | 532 | ||
| 547 | static inline void mem_cgroup_inc_page_stat(struct page *page, | 533 | static inline void mem_cgroup_inc_page_stat(struct page *page, |
| 548 | enum mem_cgroup_stat_index idx) | 534 | enum memcg_stat_item idx) |
| 549 | { | 535 | { |
| 550 | mem_cgroup_update_page_stat(page, idx, 1); | 536 | mem_cgroup_update_page_stat(page, idx, 1); |
| 551 | } | 537 | } |
| 552 | 538 | ||
| 553 | static inline void mem_cgroup_dec_page_stat(struct page *page, | 539 | static inline void mem_cgroup_dec_page_stat(struct page *page, |
| 554 | enum mem_cgroup_stat_index idx) | 540 | enum memcg_stat_item idx) |
| 555 | { | 541 | { |
| 556 | mem_cgroup_update_page_stat(page, idx, -1); | 542 | mem_cgroup_update_page_stat(page, idx, -1); |
| 557 | } | 543 | } |
| @@ -760,33 +746,33 @@ static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg, | |||
| 760 | } | 746 | } |
| 761 | 747 | ||
| 762 | static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, | 748 | static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg, |
| 763 | enum mem_cgroup_stat_index idx, int val) | 749 | enum memcg_stat_item idx, int val) |
| 764 | { | 750 | { |
| 765 | } | 751 | } |
| 766 | 752 | ||
| 767 | static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, | 753 | static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg, |
| 768 | enum mem_cgroup_stat_index idx) | 754 | enum memcg_stat_item idx) |
| 769 | { | 755 | { |
| 770 | } | 756 | } |
| 771 | 757 | ||
| 772 | static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, | 758 | static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg, |
| 773 | enum mem_cgroup_stat_index idx) | 759 | enum memcg_stat_item idx) |
| 774 | { | 760 | { |
| 775 | } | 761 | } |
| 776 | 762 | ||
| 777 | static inline void mem_cgroup_update_page_stat(struct page *page, | 763 | static inline void mem_cgroup_update_page_stat(struct page *page, |
| 778 | enum mem_cgroup_stat_index idx, | 764 | enum memcg_stat_item idx, |
| 779 | int nr) | 765 | int nr) |
| 780 | { | 766 | { |
| 781 | } | 767 | } |
| 782 | 768 | ||
| 783 | static inline void mem_cgroup_inc_page_stat(struct page *page, | 769 | static inline void mem_cgroup_inc_page_stat(struct page *page, |
| 784 | enum mem_cgroup_stat_index idx) | 770 | enum memcg_stat_item idx) |
| 785 | { | 771 | { |
| 786 | } | 772 | } |
| 787 | 773 | ||
| 788 | static inline void mem_cgroup_dec_page_stat(struct page *page, | 774 | static inline void mem_cgroup_dec_page_stat(struct page *page, |
| 789 | enum mem_cgroup_stat_index idx) | 775 | enum memcg_stat_item idx) |
| 790 | { | 776 | { |
| 791 | } | 777 | } |
| 792 | 778 | ||
| @@ -906,7 +892,7 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) | |||
| 906 | * @val: number of pages (positive or negative) | 892 | * @val: number of pages (positive or negative) |
| 907 | */ | 893 | */ |
| 908 | static inline void memcg_kmem_update_page_stat(struct page *page, | 894 | static inline void memcg_kmem_update_page_stat(struct page *page, |
| 909 | enum mem_cgroup_stat_index idx, int val) | 895 | enum memcg_stat_item idx, int val) |
| 910 | { | 896 | { |
| 911 | if (memcg_kmem_enabled() && page->mem_cgroup) | 897 | if (memcg_kmem_enabled() && page->mem_cgroup) |
| 912 | this_cpu_add(page->mem_cgroup->stat->count[idx], val); | 898 | this_cpu_add(page->mem_cgroup->stat->count[idx], val); |
| @@ -935,7 +921,7 @@ static inline void memcg_put_cache_ids(void) | |||
| 935 | } | 921 | } |
| 936 | 922 | ||
| 937 | static inline void memcg_kmem_update_page_stat(struct page *page, | 923 | static inline void memcg_kmem_update_page_stat(struct page *page, |
| 938 | enum mem_cgroup_stat_index idx, int val) | 924 | enum memcg_stat_item idx, int val) |
| 939 | { | 925 | { |
| 940 | } | 926 | } |
| 941 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ | 927 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
