aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/page_cgroup.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/page_cgroup.h')
-rw-r--r--include/linux/page_cgroup.h38
1 files changed, 33 insertions, 5 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 5bb13b3db84d..6d6cb7a57bb3 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -35,12 +35,15 @@ struct page_cgroup *lookup_page_cgroup(struct page *page);
35 35
36enum { 36enum {
37 /* flags for mem_cgroup */ 37 /* flags for mem_cgroup */
38 PCG_LOCK, /* page cgroup is locked */ 38 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
39 PCG_CACHE, /* charged as cache */ 39 PCG_CACHE, /* charged as cache */
40 PCG_USED, /* this object is in use. */ 40 PCG_USED, /* this object is in use. */
41 PCG_ACCT_LRU, /* page has been accounted for */
42 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
43 PCG_MIGRATION, /* under page migration */ 41 PCG_MIGRATION, /* under page migration */
42 /* flags for mem_cgroup and file and I/O status */
43 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
44 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
45 /* No lock in page_cgroup */
46 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
44}; 47};
45 48
46#define TESTPCGFLAG(uname, lname) \ 49#define TESTPCGFLAG(uname, lname) \
@@ -59,8 +62,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
59static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ 62static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
60 { return test_and_clear_bit(PCG_##lname, &pc->flags); } 63 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
61 64
62TESTPCGFLAG(Locked, LOCK)
63
64/* Cache flag is set only once (at allocation) */ 65/* Cache flag is set only once (at allocation) */
65TESTPCGFLAG(Cache, CACHE) 66TESTPCGFLAG(Cache, CACHE)
66CLEARPCGFLAG(Cache, CACHE) 67CLEARPCGFLAG(Cache, CACHE)
@@ -96,6 +97,10 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
96 97
97static inline void lock_page_cgroup(struct page_cgroup *pc) 98static inline void lock_page_cgroup(struct page_cgroup *pc)
98{ 99{
100 /*
101 * Don't take this lock in IRQ context.
102 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
103 */
99 bit_spin_lock(PCG_LOCK, &pc->flags); 104 bit_spin_lock(PCG_LOCK, &pc->flags);
100} 105}
101 106
@@ -104,6 +109,29 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc)
104 bit_spin_unlock(PCG_LOCK, &pc->flags); 109 bit_spin_unlock(PCG_LOCK, &pc->flags);
105} 110}
106 111
112static inline int page_is_cgroup_locked(struct page_cgroup *pc)
113{
114 return bit_spin_is_locked(PCG_LOCK, &pc->flags);
115}
116
117static inline void move_lock_page_cgroup(struct page_cgroup *pc,
118 unsigned long *flags)
119{
120 /*
121 * We know updates to pc->flags of page cache's stats are from both of
122 * usual context or IRQ context. Disable IRQ to avoid deadlock.
123 */
124 local_irq_save(*flags);
125 bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
126}
127
128static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
129 unsigned long *flags)
130{
131 bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
132 local_irq_restore(*flags);
133}
134
107#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 135#else /* CONFIG_CGROUP_MEM_RES_CTLR */
108struct page_cgroup; 136struct page_cgroup;
109 137