diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /include/linux/memcontrol.h | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r-- | include/linux/memcontrol.h | 453 |
1 files changed, 106 insertions, 347 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0108a56f814..b9c1c06cd7a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -21,27 +21,25 @@ | |||
21 | #define _LINUX_MEMCONTROL_H | 21 | #define _LINUX_MEMCONTROL_H |
22 | #include <linux/cgroup.h> | 22 | #include <linux/cgroup.h> |
23 | #include <linux/vm_event_item.h> | 23 | #include <linux/vm_event_item.h> |
24 | #include <linux/hardirq.h> | ||
25 | #include <linux/jump_label.h> | ||
26 | 24 | ||
27 | struct mem_cgroup; | 25 | struct mem_cgroup; |
28 | struct page_cgroup; | 26 | struct page_cgroup; |
29 | struct page; | 27 | struct page; |
30 | struct mm_struct; | 28 | struct mm_struct; |
31 | struct kmem_cache; | ||
32 | 29 | ||
33 | /* Stats that can be updated by kernel. */ | 30 | /* Stats that can be updated by kernel. */ |
34 | enum mem_cgroup_page_stat_item { | 31 | enum mem_cgroup_page_stat_item { |
35 | MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ | 32 | MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ |
36 | }; | 33 | }; |
37 | 34 | ||
38 | struct mem_cgroup_reclaim_cookie { | 35 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
39 | struct zone *zone; | 36 | struct list_head *dst, |
40 | int priority; | 37 | unsigned long *scanned, int order, |
41 | unsigned int generation; | 38 | int mode, struct zone *z, |
42 | }; | 39 | struct mem_cgroup *mem_cont, |
40 | int active, int file); | ||
43 | 41 | ||
44 | #ifdef CONFIG_MEMCG | 42 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
45 | /* | 43 | /* |
46 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | 44 | * All "charge" functions with gfp_mask should use GFP_KERNEL or |
47 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | 45 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't |
@@ -57,16 +55,20 @@ extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, | |||
57 | gfp_t gfp_mask); | 55 | gfp_t gfp_mask); |
58 | /* for swap handling */ | 56 | /* for swap handling */ |
59 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 57 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
60 | struct page *page, gfp_t mask, struct mem_cgroup **memcgp); | 58 | struct page *page, gfp_t mask, struct mem_cgroup **ptr); |
61 | extern void mem_cgroup_commit_charge_swapin(struct page *page, | 59 | extern void mem_cgroup_commit_charge_swapin(struct page *page, |
62 | struct mem_cgroup *memcg); | 60 | struct mem_cgroup *ptr); |
63 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); | 61 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); |
64 | 62 | ||
65 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 63 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
66 | gfp_t gfp_mask); | 64 | gfp_t gfp_mask); |
67 | 65 | extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); | |
68 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | 66 | extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); |
69 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); | 67 | extern void mem_cgroup_rotate_reclaimable_page(struct page *page); |
68 | extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); | ||
69 | extern void mem_cgroup_del_lru(struct page *page); | ||
70 | extern void mem_cgroup_move_lists(struct page *page, | ||
71 | enum lru_list from, enum lru_list to); | ||
70 | 72 | ||
71 | /* For coalescing uncharge for reducing memcg' overhead*/ | 73 | /* For coalescing uncharge for reducing memcg' overhead*/ |
72 | extern void mem_cgroup_uncharge_start(void); | 74 | extern void mem_cgroup_uncharge_start(void); |
@@ -75,57 +77,49 @@ extern void mem_cgroup_uncharge_end(void); | |||
75 | extern void mem_cgroup_uncharge_page(struct page *page); | 77 | extern void mem_cgroup_uncharge_page(struct page *page); |
76 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 78 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
77 | 79 | ||
78 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | 80 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); |
79 | struct mem_cgroup *memcg); | 81 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); |
80 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); | ||
81 | 82 | ||
82 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | 83 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); |
83 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 84 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
84 | extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); | 85 | extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); |
85 | 86 | ||
86 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | ||
87 | extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); | ||
88 | |||
89 | static inline | 87 | static inline |
90 | bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) | 88 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) |
91 | { | 89 | { |
92 | struct mem_cgroup *task_memcg; | 90 | struct mem_cgroup *mem; |
93 | bool match; | ||
94 | |||
95 | rcu_read_lock(); | 91 | rcu_read_lock(); |
96 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 92 | mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); |
97 | match = __mem_cgroup_same_or_subtree(memcg, task_memcg); | ||
98 | rcu_read_unlock(); | 93 | rcu_read_unlock(); |
99 | return match; | 94 | return cgroup == mem; |
100 | } | 95 | } |
101 | 96 | ||
102 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); | 97 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); |
103 | 98 | ||
104 | extern void | 99 | extern int |
105 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, | 100 | mem_cgroup_prepare_migration(struct page *page, |
106 | struct mem_cgroup **memcgp); | 101 | struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); |
107 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, | 102 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, |
108 | struct page *oldpage, struct page *newpage, bool migration_ok); | 103 | struct page *oldpage, struct page *newpage, bool migration_ok); |
109 | 104 | ||
110 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, | ||
111 | struct mem_cgroup *, | ||
112 | struct mem_cgroup_reclaim_cookie *); | ||
113 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | ||
114 | |||
115 | /* | 105 | /* |
116 | * For memory reclaim. | 106 | * For memory reclaim. |
117 | */ | 107 | */ |
118 | int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); | 108 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); |
119 | int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); | 109 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); |
120 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | 110 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
121 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); | 111 | unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, |
122 | void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); | 112 | int nid, int zid, unsigned int lrumask); |
113 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | ||
114 | struct zone *zone); | ||
115 | struct zone_reclaim_stat* | ||
116 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | ||
123 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 117 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
124 | struct task_struct *p); | 118 | struct task_struct *p); |
125 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, | 119 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, |
126 | struct page *newpage); | 120 | struct page *newpage); |
127 | 121 | ||
128 | #ifdef CONFIG_MEMCG_SWAP | 122 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
129 | extern int do_swap_account; | 123 | extern int do_swap_account; |
130 | #endif | 124 | #endif |
131 | 125 | ||
@@ -136,34 +130,6 @@ static inline bool mem_cgroup_disabled(void) | |||
136 | return false; | 130 | return false; |
137 | } | 131 | } |
138 | 132 | ||
139 | void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, | ||
140 | unsigned long *flags); | ||
141 | |||
142 | extern atomic_t memcg_moving; | ||
143 | |||
144 | static inline void mem_cgroup_begin_update_page_stat(struct page *page, | ||
145 | bool *locked, unsigned long *flags) | ||
146 | { | ||
147 | if (mem_cgroup_disabled()) | ||
148 | return; | ||
149 | rcu_read_lock(); | ||
150 | *locked = false; | ||
151 | if (atomic_read(&memcg_moving)) | ||
152 | __mem_cgroup_begin_update_page_stat(page, locked, flags); | ||
153 | } | ||
154 | |||
155 | void __mem_cgroup_end_update_page_stat(struct page *page, | ||
156 | unsigned long *flags); | ||
157 | static inline void mem_cgroup_end_update_page_stat(struct page *page, | ||
158 | bool *locked, unsigned long *flags) | ||
159 | { | ||
160 | if (mem_cgroup_disabled()) | ||
161 | return; | ||
162 | if (*locked) | ||
163 | __mem_cgroup_end_update_page_stat(page, flags); | ||
164 | rcu_read_unlock(); | ||
165 | } | ||
166 | |||
167 | void mem_cgroup_update_page_stat(struct page *page, | 133 | void mem_cgroup_update_page_stat(struct page *page, |
168 | enum mem_cgroup_page_stat_item idx, | 134 | enum mem_cgroup_page_stat_item idx, |
169 | int val); | 135 | int val); |
@@ -183,24 +149,18 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, | |||
183 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 149 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
184 | gfp_t gfp_mask, | 150 | gfp_t gfp_mask, |
185 | unsigned long *total_scanned); | 151 | unsigned long *total_scanned); |
152 | u64 mem_cgroup_get_limit(struct mem_cgroup *mem); | ||
186 | 153 | ||
187 | void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); | 154 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); |
188 | static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, | ||
189 | enum vm_event_item idx) | ||
190 | { | ||
191 | if (mem_cgroup_disabled()) | ||
192 | return; | ||
193 | __mem_cgroup_count_vm_event(mm, idx); | ||
194 | } | ||
195 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 155 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
196 | void mem_cgroup_split_huge_fixup(struct page *head); | 156 | void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); |
197 | #endif | 157 | #endif |
198 | 158 | ||
199 | #ifdef CONFIG_DEBUG_VM | 159 | #ifdef CONFIG_DEBUG_VM |
200 | bool mem_cgroup_bad_page_check(struct page *page); | 160 | bool mem_cgroup_bad_page_check(struct page *page); |
201 | void mem_cgroup_print_bad_page(struct page *page); | 161 | void mem_cgroup_print_bad_page(struct page *page); |
202 | #endif | 162 | #endif |
203 | #else /* CONFIG_MEMCG */ | 163 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
204 | struct mem_cgroup; | 164 | struct mem_cgroup; |
205 | 165 | ||
206 | static inline int mem_cgroup_newpage_charge(struct page *page, | 166 | static inline int mem_cgroup_newpage_charge(struct page *page, |
@@ -216,17 +176,17 @@ static inline int mem_cgroup_cache_charge(struct page *page, | |||
216 | } | 176 | } |
217 | 177 | ||
218 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 178 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
219 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) | 179 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) |
220 | { | 180 | { |
221 | return 0; | 181 | return 0; |
222 | } | 182 | } |
223 | 183 | ||
224 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | 184 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, |
225 | struct mem_cgroup *memcg) | 185 | struct mem_cgroup *ptr) |
226 | { | 186 | { |
227 | } | 187 | } |
228 | 188 | ||
229 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) | 189 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) |
230 | { | 190 | { |
231 | } | 191 | } |
232 | 192 | ||
@@ -246,16 +206,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) | |||
246 | { | 206 | { |
247 | } | 207 | } |
248 | 208 | ||
249 | static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, | 209 | static inline void mem_cgroup_add_lru_list(struct page *page, int lru) |
250 | struct mem_cgroup *memcg) | ||
251 | { | 210 | { |
252 | return &zone->lruvec; | ||
253 | } | 211 | } |
254 | 212 | ||
255 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, | 213 | static inline void mem_cgroup_del_lru_list(struct page *page, int lru) |
256 | struct zone *zone) | 214 | { |
215 | return ; | ||
216 | } | ||
217 | |||
218 | static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) | ||
219 | { | ||
220 | return ; | ||
221 | } | ||
222 | |||
223 | static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) | ||
224 | { | ||
225 | return ; | ||
226 | } | ||
227 | |||
228 | static inline void mem_cgroup_del_lru(struct page *page) | ||
229 | { | ||
230 | return ; | ||
231 | } | ||
232 | |||
233 | static inline void | ||
234 | mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) | ||
257 | { | 235 | { |
258 | return &zone->lruvec; | ||
259 | } | 236 | } |
260 | 237 | ||
261 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | 238 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) |
@@ -268,45 +245,46 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm | |||
268 | return NULL; | 245 | return NULL; |
269 | } | 246 | } |
270 | 247 | ||
271 | static inline bool mm_match_cgroup(struct mm_struct *mm, | 248 | static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) |
272 | struct mem_cgroup *memcg) | ||
273 | { | 249 | { |
274 | return true; | 250 | return 1; |
275 | } | 251 | } |
276 | 252 | ||
277 | static inline int task_in_mem_cgroup(struct task_struct *task, | 253 | static inline int task_in_mem_cgroup(struct task_struct *task, |
278 | const struct mem_cgroup *memcg) | 254 | const struct mem_cgroup *mem) |
279 | { | 255 | { |
280 | return 1; | 256 | return 1; |
281 | } | 257 | } |
282 | 258 | ||
283 | static inline struct cgroup_subsys_state | 259 | static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) |
284 | *mem_cgroup_css(struct mem_cgroup *memcg) | ||
285 | { | 260 | { |
286 | return NULL; | 261 | return NULL; |
287 | } | 262 | } |
288 | 263 | ||
289 | static inline void | 264 | static inline int |
290 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, | 265 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, |
291 | struct mem_cgroup **memcgp) | 266 | struct mem_cgroup **ptr, gfp_t gfp_mask) |
292 | { | 267 | { |
268 | return 0; | ||
293 | } | 269 | } |
294 | 270 | ||
295 | static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, | 271 | static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, |
296 | struct page *oldpage, struct page *newpage, bool migration_ok) | 272 | struct page *oldpage, struct page *newpage, bool migration_ok) |
297 | { | 273 | { |
298 | } | 274 | } |
299 | 275 | ||
300 | static inline struct mem_cgroup * | 276 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) |
301 | mem_cgroup_iter(struct mem_cgroup *root, | 277 | { |
302 | struct mem_cgroup *prev, | 278 | return 0; |
303 | struct mem_cgroup_reclaim_cookie *reclaim) | 279 | } |
280 | |||
281 | static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | ||
282 | int priority) | ||
304 | { | 283 | { |
305 | return NULL; | ||
306 | } | 284 | } |
307 | 285 | ||
308 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | 286 | static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, |
309 | struct mem_cgroup *prev) | 287 | int priority) |
310 | { | 288 | { |
311 | } | 289 | } |
312 | 290 | ||
@@ -316,41 +294,39 @@ static inline bool mem_cgroup_disabled(void) | |||
316 | } | 294 | } |
317 | 295 | ||
318 | static inline int | 296 | static inline int |
319 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) | 297 | mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) |
320 | { | 298 | { |
321 | return 1; | 299 | return 1; |
322 | } | 300 | } |
323 | 301 | ||
324 | static inline int | 302 | static inline int |
325 | mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) | 303 | mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) |
326 | { | 304 | { |
327 | return 1; | 305 | return 1; |
328 | } | 306 | } |
329 | 307 | ||
330 | static inline unsigned long | 308 | static inline unsigned long |
331 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) | 309 | mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, |
310 | unsigned int lru_mask) | ||
332 | { | 311 | { |
333 | return 0; | 312 | return 0; |
334 | } | 313 | } |
335 | 314 | ||
336 | static inline void | ||
337 | mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | ||
338 | int increment) | ||
339 | { | ||
340 | } | ||
341 | 315 | ||
342 | static inline void | 316 | static inline struct zone_reclaim_stat* |
343 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 317 | mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) |
344 | { | 318 | { |
319 | return NULL; | ||
345 | } | 320 | } |
346 | 321 | ||
347 | static inline void mem_cgroup_begin_update_page_stat(struct page *page, | 322 | static inline struct zone_reclaim_stat* |
348 | bool *locked, unsigned long *flags) | 323 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) |
349 | { | 324 | { |
325 | return NULL; | ||
350 | } | 326 | } |
351 | 327 | ||
352 | static inline void mem_cgroup_end_update_page_stat(struct page *page, | 328 | static inline void |
353 | bool *locked, unsigned long *flags) | 329 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) |
354 | { | 330 | { |
355 | } | 331 | } |
356 | 332 | ||
@@ -372,7 +348,14 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
372 | return 0; | 348 | return 0; |
373 | } | 349 | } |
374 | 350 | ||
375 | static inline void mem_cgroup_split_huge_fixup(struct page *head) | 351 | static inline |
352 | u64 mem_cgroup_get_limit(struct mem_cgroup *mem) | ||
353 | { | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static inline void mem_cgroup_split_huge_fixup(struct page *head, | ||
358 | struct page *tail) | ||
376 | { | 359 | { |
377 | } | 360 | } |
378 | 361 | ||
@@ -384,9 +367,9 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage, | |||
384 | struct page *newpage) | 367 | struct page *newpage) |
385 | { | 368 | { |
386 | } | 369 | } |
387 | #endif /* CONFIG_MEMCG */ | 370 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
388 | 371 | ||
389 | #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) | 372 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) |
390 | static inline bool | 373 | static inline bool |
391 | mem_cgroup_bad_page_check(struct page *page) | 374 | mem_cgroup_bad_page_check(struct page *page) |
392 | { | 375 | { |
@@ -399,229 +382,5 @@ mem_cgroup_print_bad_page(struct page *page) | |||
399 | } | 382 | } |
400 | #endif | 383 | #endif |
401 | 384 | ||
402 | enum { | ||
403 | UNDER_LIMIT, | ||
404 | SOFT_LIMIT, | ||
405 | OVER_LIMIT, | ||
406 | }; | ||
407 | |||
408 | struct sock; | ||
409 | #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) | ||
410 | void sock_update_memcg(struct sock *sk); | ||
411 | void sock_release_memcg(struct sock *sk); | ||
412 | #else | ||
413 | static inline void sock_update_memcg(struct sock *sk) | ||
414 | { | ||
415 | } | ||
416 | static inline void sock_release_memcg(struct sock *sk) | ||
417 | { | ||
418 | } | ||
419 | #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ | ||
420 | |||
421 | #ifdef CONFIG_MEMCG_KMEM | ||
422 | extern struct static_key memcg_kmem_enabled_key; | ||
423 | |||
424 | extern int memcg_limited_groups_array_size; | ||
425 | |||
426 | /* | ||
427 | * Helper macro to loop through all memcg-specific caches. Callers must still | ||
428 | * check if the cache is valid (it is either valid or NULL). | ||
429 | * the slab_mutex must be held when looping through those caches | ||
430 | */ | ||
431 | #define for_each_memcg_cache_index(_idx) \ | ||
432 | for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) | ||
433 | |||
434 | static inline bool memcg_kmem_enabled(void) | ||
435 | { | ||
436 | return static_key_false(&memcg_kmem_enabled_key); | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * In general, we'll do everything in our power to not incur in any overhead | ||
441 | * for non-memcg users for the kmem functions. Not even a function call, if we | ||
442 | * can avoid it. | ||
443 | * | ||
444 | * Therefore, we'll inline all those functions so that in the best case, we'll | ||
445 | * see that kmemcg is off for everybody and proceed quickly. If it is on, | ||
446 | * we'll still do most of the flag checking inline. We check a lot of | ||
447 | * conditions, but because they are pretty simple, they are expected to be | ||
448 | * fast. | ||
449 | */ | ||
450 | bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, | ||
451 | int order); | ||
452 | void __memcg_kmem_commit_charge(struct page *page, | ||
453 | struct mem_cgroup *memcg, int order); | ||
454 | void __memcg_kmem_uncharge_pages(struct page *page, int order); | ||
455 | |||
456 | int memcg_cache_id(struct mem_cgroup *memcg); | ||
457 | int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | ||
458 | struct kmem_cache *root_cache); | ||
459 | void memcg_release_cache(struct kmem_cache *cachep); | ||
460 | void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); | ||
461 | |||
462 | int memcg_update_cache_size(struct kmem_cache *s, int num_groups); | ||
463 | void memcg_update_array_size(int num_groups); | ||
464 | |||
465 | struct kmem_cache * | ||
466 | __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); | ||
467 | |||
468 | void mem_cgroup_destroy_cache(struct kmem_cache *cachep); | ||
469 | void kmem_cache_destroy_memcg_children(struct kmem_cache *s); | ||
470 | |||
471 | /** | ||
472 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. | ||
473 | * @gfp: the gfp allocation flags. | ||
474 | * @memcg: a pointer to the memcg this was charged against. | ||
475 | * @order: allocation order. | ||
476 | * | ||
477 | * returns true if the memcg where the current task belongs can hold this | ||
478 | * allocation. | ||
479 | * | ||
480 | * We return true automatically if this allocation is not to be accounted to | ||
481 | * any memcg. | ||
482 | */ | ||
483 | static inline bool | ||
484 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
485 | { | ||
486 | if (!memcg_kmem_enabled()) | ||
487 | return true; | ||
488 | |||
489 | /* | ||
490 | * __GFP_NOFAIL allocations will move on even if charging is not | ||
491 | * possible. Therefore we don't even try, and have this allocation | ||
492 | * unaccounted. We could in theory charge it with | ||
493 | * res_counter_charge_nofail, but we hope those allocations are rare, | ||
494 | * and won't be worth the trouble. | ||
495 | */ | ||
496 | if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL)) | ||
497 | return true; | ||
498 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
499 | return true; | ||
500 | |||
501 | /* If the test is dying, just let it go. */ | ||
502 | if (unlikely(fatal_signal_pending(current))) | ||
503 | return true; | ||
504 | |||
505 | return __memcg_kmem_newpage_charge(gfp, memcg, order); | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * memcg_kmem_uncharge_pages: uncharge pages from memcg | ||
510 | * @page: pointer to struct page being freed | ||
511 | * @order: allocation order. | ||
512 | * | ||
513 | * there is no need to specify memcg here, since it is embedded in page_cgroup | ||
514 | */ | ||
515 | static inline void | ||
516 | memcg_kmem_uncharge_pages(struct page *page, int order) | ||
517 | { | ||
518 | if (memcg_kmem_enabled()) | ||
519 | __memcg_kmem_uncharge_pages(page, order); | ||
520 | } | ||
521 | |||
522 | /** | ||
523 | * memcg_kmem_commit_charge: embeds correct memcg in a page | ||
524 | * @page: pointer to struct page recently allocated | ||
525 | * @memcg: the memcg structure we charged against | ||
526 | * @order: allocation order. | ||
527 | * | ||
528 | * Needs to be called after memcg_kmem_newpage_charge, regardless of success or | ||
529 | * failure of the allocation. if @page is NULL, this function will revert the | ||
530 | * charges. Otherwise, it will commit the memcg given by @memcg to the | ||
531 | * corresponding page_cgroup. | ||
532 | */ | ||
533 | static inline void | ||
534 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
535 | { | ||
536 | if (memcg_kmem_enabled() && memcg) | ||
537 | __memcg_kmem_commit_charge(page, memcg, order); | ||
538 | } | ||
539 | |||
540 | /** | ||
541 | * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation | ||
542 | * @cachep: the original global kmem cache | ||
543 | * @gfp: allocation flags. | ||
544 | * | ||
545 | * This function assumes that the task allocating, which determines the memcg | ||
546 | * in the page allocator, belongs to the same cgroup throughout the whole | ||
547 | * process. Misacounting can happen if the task calls memcg_kmem_get_cache() | ||
548 | * while belonging to a cgroup, and later on changes. This is considered | ||
549 | * acceptable, and should only happen upon task migration. | ||
550 | * | ||
551 | * Before the cache is created by the memcg core, there is also a possible | ||
552 | * imbalance: the task belongs to a memcg, but the cache being allocated from | ||
553 | * is the global cache, since the child cache is not yet guaranteed to be | ||
554 | * ready. This case is also fine, since in this case the GFP_KMEMCG will not be | ||
555 | * passed and the page allocator will not attempt any cgroup accounting. | ||
556 | */ | ||
557 | static __always_inline struct kmem_cache * | ||
558 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | ||
559 | { | ||
560 | if (!memcg_kmem_enabled()) | ||
561 | return cachep; | ||
562 | if (gfp & __GFP_NOFAIL) | ||
563 | return cachep; | ||
564 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) | ||
565 | return cachep; | ||
566 | if (unlikely(fatal_signal_pending(current))) | ||
567 | return cachep; | ||
568 | |||
569 | return __memcg_kmem_get_cache(cachep, gfp); | ||
570 | } | ||
571 | #else | ||
572 | #define for_each_memcg_cache_index(_idx) \ | ||
573 | for (; NULL; ) | ||
574 | |||
575 | static inline bool memcg_kmem_enabled(void) | ||
576 | { | ||
577 | return false; | ||
578 | } | ||
579 | |||
580 | static inline bool | ||
581 | memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) | ||
582 | { | ||
583 | return true; | ||
584 | } | ||
585 | |||
586 | static inline void memcg_kmem_uncharge_pages(struct page *page, int order) | ||
587 | { | ||
588 | } | ||
589 | |||
590 | static inline void | ||
591 | memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) | ||
592 | { | ||
593 | } | ||
594 | |||
595 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | ||
596 | { | ||
597 | return -1; | ||
598 | } | ||
599 | |||
600 | static inline int | ||
601 | memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | ||
602 | struct kmem_cache *root_cache) | ||
603 | { | ||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static inline void memcg_release_cache(struct kmem_cache *cachep) | ||
608 | { | ||
609 | } | ||
610 | |||
611 | static inline void memcg_cache_list_add(struct mem_cgroup *memcg, | ||
612 | struct kmem_cache *s) | ||
613 | { | ||
614 | } | ||
615 | |||
616 | static inline struct kmem_cache * | ||
617 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) | ||
618 | { | ||
619 | return cachep; | ||
620 | } | ||
621 | |||
622 | static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | ||
623 | { | ||
624 | } | ||
625 | #endif /* CONFIG_MEMCG_KMEM */ | ||
626 | #endif /* _LINUX_MEMCONTROL_H */ | 385 | #endif /* _LINUX_MEMCONTROL_H */ |
627 | 386 | ||