diff options
-rw-r--r-- | include/linux/pagemap.h | 14 | ||||
-rw-r--r-- | mm/swap_state.c | 4 |
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 4b6c4d8d26b8..7334b2b6c4c6 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page); | |||
299 | extern void __lock_page_nosync(struct page *page); | 299 | extern void __lock_page_nosync(struct page *page); |
300 | extern void unlock_page(struct page *page); | 300 | extern void unlock_page(struct page *page); |
301 | 301 | ||
302 | static inline void set_page_locked(struct page *page) | 302 | static inline void __set_page_locked(struct page *page) |
303 | { | 303 | { |
304 | set_bit(PG_locked, &page->flags); | 304 | __set_bit(PG_locked, &page->flags); |
305 | } | 305 | } |
306 | 306 | ||
307 | static inline void clear_page_locked(struct page *page) | 307 | static inline void __clear_page_locked(struct page *page) |
308 | { | 308 | { |
309 | clear_bit(PG_locked, &page->flags); | 309 | __clear_bit(PG_locked, &page->flags); |
310 | } | 310 | } |
311 | 311 | ||
312 | static inline int trylock_page(struct page *page) | 312 | static inline int trylock_page(struct page *page) |
@@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page); | |||
438 | 438 | ||
439 | /* | 439 | /* |
440 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | 440 | * Like add_to_page_cache_locked, but used to add newly allocated pages: |
441 | * the page is new, so we can just run set_page_locked() against it. | 441 | * the page is new, so we can just run __set_page_locked() against it. |
442 | */ | 442 | */ |
443 | static inline int add_to_page_cache(struct page *page, | 443 | static inline int add_to_page_cache(struct page *page, |
444 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | 444 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) |
445 | { | 445 | { |
446 | int error; | 446 | int error; |
447 | 447 | ||
448 | set_page_locked(page); | 448 | __set_page_locked(page); |
449 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | 449 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
450 | if (unlikely(error)) | 450 | if (unlikely(error)) |
451 | clear_page_locked(page); | 451 | __clear_page_locked(page); |
452 | return error; | 452 | return error; |
453 | } | 453 | } |
454 | 454 | ||
diff --git a/mm/swap_state.c b/mm/swap_state.c index 43cda7b4b808..3353c9029cef 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
303 | * re-using the just freed swap entry for an existing page. | 303 | * re-using the just freed swap entry for an existing page. |
304 | * May fail (-ENOMEM) if radix-tree node allocation failed. | 304 | * May fail (-ENOMEM) if radix-tree node allocation failed. |
305 | */ | 305 | */ |
306 | set_page_locked(new_page); | 306 | __set_page_locked(new_page); |
307 | SetPageSwapBacked(new_page); | 307 | SetPageSwapBacked(new_page); |
308 | err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); | 308 | err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); |
309 | if (likely(!err)) { | 309 | if (likely(!err)) { |
@@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
315 | return new_page; | 315 | return new_page; |
316 | } | 316 | } |
317 | ClearPageSwapBacked(new_page); | 317 | ClearPageSwapBacked(new_page); |
318 | clear_page_locked(new_page); | 318 | __clear_page_locked(new_page); |
319 | swap_free(entry); | 319 | swap_free(entry); |
320 | } while (err != -ENOMEM); | 320 | } while (err != -ENOMEM); |
321 | 321 | ||