aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-10-18 23:26:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:31 -0400
commitf45840b5c128445da70e7ec33adc47b4a12bdaf4 (patch)
tree3815a03d12d1c69d71a48c44cd216c3e1e84272a /include
parent9978ad583e100945b74e4f33e73317983ea32df9 (diff)
mm: pagecache insertion fewer atomics
Setting and clearing the page locked when inserting it into swapcache / pagecache when it has no other references can use non-atomic page flags operations because no other CPU may be operating on it at this time. This saves one atomic operation when inserting a page into pagecache. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/pagemap.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b6c4d8d26b8..7334b2b6c4c6 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page);
299extern void __lock_page_nosync(struct page *page); 299extern void __lock_page_nosync(struct page *page);
300extern void unlock_page(struct page *page); 300extern void unlock_page(struct page *page);
301 301
302static inline void set_page_locked(struct page *page) 302static inline void __set_page_locked(struct page *page)
303{ 303{
304 set_bit(PG_locked, &page->flags); 304 __set_bit(PG_locked, &page->flags);
305} 305}
306 306
307static inline void clear_page_locked(struct page *page) 307static inline void __clear_page_locked(struct page *page)
308{ 308{
309 clear_bit(PG_locked, &page->flags); 309 __clear_bit(PG_locked, &page->flags);
310} 310}
311 311
312static inline int trylock_page(struct page *page) 312static inline int trylock_page(struct page *page)
@@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
438 438
439/* 439/*
440 * Like add_to_page_cache_locked, but used to add newly allocated pages: 440 * Like add_to_page_cache_locked, but used to add newly allocated pages:
441 * the page is new, so we can just run set_page_locked() against it. 441 * the page is new, so we can just run __set_page_locked() against it.
442 */ 442 */
443static inline int add_to_page_cache(struct page *page, 443static inline int add_to_page_cache(struct page *page,
444 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 444 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
445{ 445{
446 int error; 446 int error;
447 447
448 set_page_locked(page); 448 __set_page_locked(page);
449 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 449 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
450 if (unlikely(error)) 450 if (unlikely(error))
451 clear_page_locked(page); 451 __clear_page_locked(page);
452 return error; 452 return error;
453} 453}
454 454