summaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorjohn.hubbard@gmail.com <john.hubbard@gmail.com>2019-03-05 18:48:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:20 -0500
commit494eec70f054965e2e699db450cde2c08db1c008 (patch)
treee413ad49a7d913e30377c47ddc49813a58537884 /include/linux/pagemap.h
parentf900482da560941f978b0d36660e96f48ea78752 (diff)
mm: page_cache_add_speculative(): refactor out some code duplication
From: John Hubbard <jhubbard@nvidia.com> This combines the common elements of these routines: page_cache_get_speculative() page_cache_add_speculative() This was anticipated by the original author, as shown by the comment in commit ce0ad7f095258 ("powerpc/mm: Lockless get_user_pages_fast() for 64-bit (v3)"): "Same as above, but add instead of inc (could just be merged)" There is no intention to introduce any behavioral change, but there is a small risk of that, due to slightly differing ways of expressing the TINY_RCU and related configurations. This also removes the VM_BUG_ON(in_interrupt()) that was in page_cache_add_speculative(), but not in page_cache_get_speculative(). This provides slightly less detection of such bugs, but it given that it was only there on the "add" path anyway, we can likely do without it just fine. And it removes the VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); that page_cache_add_speculative() had. Link: http://lkml.kernel.org/r/20190206231016.22734-2-jhubbard@nvidia.com Signed-off-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jeff Layton <jlayton@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h31
1 files changed, 9 insertions, 22 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e2d7039af6a3..b477a70cc2e4 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -164,7 +164,7 @@ void release_pages(struct page **pages, int nr);
164 * will find the page or it will not. Likewise, the old find_get_page could run 164 * will find the page or it will not. Likewise, the old find_get_page could run
165 * either before the insertion or afterwards, depending on timing. 165 * either before the insertion or afterwards, depending on timing.
166 */ 166 */
167static inline int page_cache_get_speculative(struct page *page) 167static inline int __page_cache_add_speculative(struct page *page, int count)
168{ 168{
169#ifdef CONFIG_TINY_RCU 169#ifdef CONFIG_TINY_RCU
170# ifdef CONFIG_PREEMPT_COUNT 170# ifdef CONFIG_PREEMPT_COUNT
@@ -180,10 +180,10 @@ static inline int page_cache_get_speculative(struct page *page)
180 * SMP requires. 180 * SMP requires.
181 */ 181 */
182 VM_BUG_ON_PAGE(page_count(page) == 0, page); 182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 page_ref_inc(page); 183 page_ref_add(page, count);
184 184
185#else 185#else
186 if (unlikely(!get_page_unless_zero(page))) { 186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
187 /* 187 /*
188 * Either the page has been freed, or will be freed. 188 * Either the page has been freed, or will be freed.
189 * In either case, retry here and the caller should 189 * In either case, retry here and the caller should
@@ -197,27 +197,14 @@ static inline int page_cache_get_speculative(struct page *page)
197 return 1; 197 return 1;
198} 198}
199 199
200/* 200static inline int page_cache_get_speculative(struct page *page)
201 * Same as above, but add instead of inc (could just be merged)
202 */
203static inline int page_cache_add_speculative(struct page *page, int count)
204{ 201{
205 VM_BUG_ON(in_interrupt()); 202 return __page_cache_add_speculative(page, 1);
206 203}
207#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
208# ifdef CONFIG_PREEMPT_COUNT
209 VM_BUG_ON(!in_atomic() && !irqs_disabled());
210# endif
211 VM_BUG_ON_PAGE(page_count(page) == 0, page);
212 page_ref_add(page, count);
213
214#else
215 if (unlikely(!page_ref_add_unless(page, count, 0)))
216 return 0;
217#endif
218 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
219 204
220 return 1; 205static inline int page_cache_add_speculative(struct page *page, int count)
206{
207 return __page_cache_add_speculative(page, count);
221} 208}
222 209
223#ifdef CONFIG_NUMA 210#ifdef CONFIG_NUMA