aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/page_cgroup.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/page_cgroup.h')
-rw-r--r--include/linux/page_cgroup.h90
1 files changed, 58 insertions, 32 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 6d6cb7a57bb3..961ecc7d30bc 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -1,8 +1,26 @@
1#ifndef __LINUX_PAGE_CGROUP_H 1#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H 2#define __LINUX_PAGE_CGROUP_H
3 3
4enum {
5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
7 PCG_CACHE, /* charged as cache */
8 PCG_USED, /* this object is in use. */
9 PCG_MIGRATION, /* under page migration */
10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 /* No lock in page_cgroup */
14 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
15 __NR_PCG_FLAGS,
16};
17
18#ifndef __GENERATING_BOUNDS_H
19#include <generated/bounds.h>
20
4#ifdef CONFIG_CGROUP_MEM_RES_CTLR 21#ifdef CONFIG_CGROUP_MEM_RES_CTLR
5#include <linux/bit_spinlock.h> 22#include <linux/bit_spinlock.h>
23
6/* 24/*
7 * Page Cgroup can be considered as an extended mem_map. 25 * Page Cgroup can be considered as an extended mem_map.
8 * A page_cgroup page is associated with every page descriptor. The 26 * A page_cgroup page is associated with every page descriptor. The
@@ -13,7 +31,6 @@
13struct page_cgroup { 31struct page_cgroup {
14 unsigned long flags; 32 unsigned long flags;
15 struct mem_cgroup *mem_cgroup; 33 struct mem_cgroup *mem_cgroup;
16 struct page *page;
17 struct list_head lru; /* per cgroup LRU list */ 34 struct list_head lru; /* per cgroup LRU list */
18}; 35};
19 36
@@ -32,19 +49,7 @@ static inline void __init page_cgroup_init(void)
32#endif 49#endif
33 50
34struct page_cgroup *lookup_page_cgroup(struct page *page); 51struct page_cgroup *lookup_page_cgroup(struct page *page);
35 52struct page *lookup_cgroup_page(struct page_cgroup *pc);
36enum {
37 /* flags for mem_cgroup */
38 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
39 PCG_CACHE, /* charged as cache */
40 PCG_USED, /* this object is in use. */
41 PCG_MIGRATION, /* under page migration */
42 /* flags for mem_cgroup and file and I/O status */
43 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
44 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
45 /* No lock in page_cgroup */
46 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
47};
48 53
49#define TESTPCGFLAG(uname, lname) \ 54#define TESTPCGFLAG(uname, lname) \
50static inline int PageCgroup##uname(struct page_cgroup *pc) \ 55static inline int PageCgroup##uname(struct page_cgroup *pc) \
@@ -85,16 +90,6 @@ SETPCGFLAG(Migration, MIGRATION)
85CLEARPCGFLAG(Migration, MIGRATION) 90CLEARPCGFLAG(Migration, MIGRATION)
86TESTPCGFLAG(Migration, MIGRATION) 91TESTPCGFLAG(Migration, MIGRATION)
87 92
88static inline int page_cgroup_nid(struct page_cgroup *pc)
89{
90 return page_to_nid(pc->page);
91}
92
93static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
94{
95 return page_zonenum(pc->page);
96}
97
98static inline void lock_page_cgroup(struct page_cgroup *pc) 93static inline void lock_page_cgroup(struct page_cgroup *pc)
99{ 94{
100 /* 95 /*
@@ -109,11 +104,6 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc)
109 bit_spin_unlock(PCG_LOCK, &pc->flags); 104 bit_spin_unlock(PCG_LOCK, &pc->flags);
110} 105}
111 106
112static inline int page_is_cgroup_locked(struct page_cgroup *pc)
113{
114 return bit_spin_is_locked(PCG_LOCK, &pc->flags);
115}
116
117static inline void move_lock_page_cgroup(struct page_cgroup *pc, 107static inline void move_lock_page_cgroup(struct page_cgroup *pc,
118 unsigned long *flags) 108 unsigned long *flags)
119{ 109{
@@ -132,6 +122,39 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
132 local_irq_restore(*flags); 122 local_irq_restore(*flags);
133} 123}
134 124
125#ifdef CONFIG_SPARSEMEM
126#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
127#else
128#define PCG_ARRAYID_WIDTH NODES_SHIFT
129#endif
130
131#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
132#error Not enough space left in pc->flags to store page_cgroup array IDs
133#endif
134
135/* pc->flags: ARRAY-ID | FLAGS */
136
137#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
138
139#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
140/*
141 * Zero the shift count for non-existent fields, to prevent compiler
142 * warnings and ensure references are optimized away.
143 */
144#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
145
146static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
147 unsigned long id)
148{
149 pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
150 pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
151}
152
153static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
154{
155 return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
156}
157
135#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 158#else /* CONFIG_CGROUP_MEM_RES_CTLR */
136struct page_cgroup; 159struct page_cgroup;
137 160
@@ -152,7 +175,7 @@ static inline void __init page_cgroup_init_flatmem(void)
152{ 175{
153} 176}
154 177
155#endif 178#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
156 179
157#include <linux/swap.h> 180#include <linux/swap.h>
158 181
@@ -188,5 +211,8 @@ static inline void swap_cgroup_swapoff(int type)
188 return; 211 return;
189} 212}
190 213
191#endif 214#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
192#endif 215
216#endif /* !__GENERATING_BOUNDS_H */
217
218#endif /* __LINUX_PAGE_CGROUP_H */