aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/page_ref.h
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-05-19 20:10:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 22:12:14 -0400
commit0139aa7b7fa12ceef095d99dc36606a5b10ab83a (patch)
tree94da74f2f79911a11a3c7c34f73ba971dec41a7e /include/linux/page_ref.h
parent6d061f9f6136d477932088c24ce155d7dc785746 (diff)
mm: rename _count, field of the struct page, to _refcount
Many developers already know that field for reference count of the struct page is _count and atomic type. They would try to handle it directly and this could break the purpose of page reference count tracepoint. To prevent direct _count modification, this patch rename it to _refcount and add warning message on the code. After that, developer who need to handle reference count will find that field should not be accessed directly. [akpm@linux-foundation.org: fix comments, per Vlastimil] [akpm@linux-foundation.org: Documentation/vm/transhuge.txt too] [sfr@canb.auug.org.au: sync ethernet driver changes] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Sunil Goutham <sgoutham@cavium.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Manish Chopra <manish.chopra@qlogic.com> Cc: Yuval Mintz <yuval.mintz@qlogic.com> Cc: Tariq Toukan <tariqt@mellanox.com> Cc: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/page_ref.h')
-rw-r--r--include/linux/page_ref.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index e596d5d9540e..8b5e0a9f2431 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
63 63
64static inline int page_ref_count(struct page *page) 64static inline int page_ref_count(struct page *page)
65{ 65{
66 return atomic_read(&page->_count); 66 return atomic_read(&page->_refcount);
67} 67}
68 68
69static inline int page_count(struct page *page) 69static inline int page_count(struct page *page)
70{ 70{
71 return atomic_read(&compound_head(page)->_count); 71 return atomic_read(&compound_head(page)->_refcount);
72} 72}
73 73
74static inline void set_page_count(struct page *page, int v) 74static inline void set_page_count(struct page *page, int v)
75{ 75{
76 atomic_set(&page->_count, v); 76 atomic_set(&page->_refcount, v);
77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) 77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
78 __page_ref_set(page, v); 78 __page_ref_set(page, v);
79} 79}
@@ -89,35 +89,35 @@ static inline void init_page_count(struct page *page)
89 89
90static inline void page_ref_add(struct page *page, int nr) 90static inline void page_ref_add(struct page *page, int nr)
91{ 91{
92 atomic_add(nr, &page->_count); 92 atomic_add(nr, &page->_refcount);
93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
94 __page_ref_mod(page, nr); 94 __page_ref_mod(page, nr);
95} 95}
96 96
97static inline void page_ref_sub(struct page *page, int nr) 97static inline void page_ref_sub(struct page *page, int nr)
98{ 98{
99 atomic_sub(nr, &page->_count); 99 atomic_sub(nr, &page->_refcount);
100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
101 __page_ref_mod(page, -nr); 101 __page_ref_mod(page, -nr);
102} 102}
103 103
104static inline void page_ref_inc(struct page *page) 104static inline void page_ref_inc(struct page *page)
105{ 105{
106 atomic_inc(&page->_count); 106 atomic_inc(&page->_refcount);
107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
108 __page_ref_mod(page, 1); 108 __page_ref_mod(page, 1);
109} 109}
110 110
111static inline void page_ref_dec(struct page *page) 111static inline void page_ref_dec(struct page *page)
112{ 112{
113 atomic_dec(&page->_count); 113 atomic_dec(&page->_refcount);
114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
115 __page_ref_mod(page, -1); 115 __page_ref_mod(page, -1);
116} 116}
117 117
118static inline int page_ref_sub_and_test(struct page *page, int nr) 118static inline int page_ref_sub_and_test(struct page *page, int nr)
119{ 119{
120 int ret = atomic_sub_and_test(nr, &page->_count); 120 int ret = atomic_sub_and_test(nr, &page->_refcount);
121 121
122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
123 __page_ref_mod_and_test(page, -nr, ret); 123 __page_ref_mod_and_test(page, -nr, ret);
@@ -126,7 +126,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
126 126
127static inline int page_ref_dec_and_test(struct page *page) 127static inline int page_ref_dec_and_test(struct page *page)
128{ 128{
129 int ret = atomic_dec_and_test(&page->_count); 129 int ret = atomic_dec_and_test(&page->_refcount);
130 130
131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
132 __page_ref_mod_and_test(page, -1, ret); 132 __page_ref_mod_and_test(page, -1, ret);
@@ -135,7 +135,7 @@ static inline int page_ref_dec_and_test(struct page *page)
135 135
136static inline int page_ref_dec_return(struct page *page) 136static inline int page_ref_dec_return(struct page *page)
137{ 137{
138 int ret = atomic_dec_return(&page->_count); 138 int ret = atomic_dec_return(&page->_refcount);
139 139
140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
141 __page_ref_mod_and_return(page, -1, ret); 141 __page_ref_mod_and_return(page, -1, ret);
@@ -144,7 +144,7 @@ static inline int page_ref_dec_return(struct page *page)
144 144
145static inline int page_ref_add_unless(struct page *page, int nr, int u) 145static inline int page_ref_add_unless(struct page *page, int nr, int u)
146{ 146{
147 int ret = atomic_add_unless(&page->_count, nr, u); 147 int ret = atomic_add_unless(&page->_refcount, nr, u);
148 148
149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) 149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
150 __page_ref_mod_unless(page, nr, ret); 150 __page_ref_mod_unless(page, nr, ret);
@@ -153,7 +153,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u)
153 153
154static inline int page_ref_freeze(struct page *page, int count) 154static inline int page_ref_freeze(struct page *page, int count)
155{ 155{
156 int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count); 156 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
157 157
158 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) 158 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
159 __page_ref_freeze(page, count, ret); 159 __page_ref_freeze(page, count, ret);
@@ -165,7 +165,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
165 VM_BUG_ON_PAGE(page_count(page) != 0, page); 165 VM_BUG_ON_PAGE(page_count(page) != 0, page);
166 VM_BUG_ON(count == 0); 166 VM_BUG_ON(count == 0);
167 167
168 atomic_set(&page->_count, count); 168 atomic_set(&page->_refcount, count);
169 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) 169 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
170 __page_ref_unfreeze(page, count); 170 __page_ref_unfreeze(page, count);
171} 171}