diff options
Diffstat (limited to 'include/linux/pagemap.h')
| -rw-r--r-- | include/linux/pagemap.h | 90 |
1 files changed, 64 insertions, 26 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a39b38ccdc97..5da31c12101c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -143,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 143 | return 1; | 143 | return 1; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /* | ||
| 147 | * Same as above, but add instead of inc (could just be merged) | ||
| 148 | */ | ||
| 149 | static inline int page_cache_add_speculative(struct page *page, int count) | ||
| 150 | { | ||
| 151 | VM_BUG_ON(in_interrupt()); | ||
| 152 | |||
| 153 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | ||
| 154 | # ifdef CONFIG_PREEMPT | ||
| 155 | VM_BUG_ON(!in_atomic()); | ||
| 156 | # endif | ||
| 157 | VM_BUG_ON(page_count(page) == 0); | ||
| 158 | atomic_add(count, &page->_count); | ||
| 159 | |||
| 160 | #else | ||
| 161 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | ||
| 162 | return 0; | ||
| 163 | #endif | ||
| 164 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | ||
| 165 | |||
| 166 | return 1; | ||
| 167 | } | ||
| 168 | |||
| 146 | static inline int page_freeze_refs(struct page *page, int count) | 169 | static inline int page_freeze_refs(struct page *page, int count) |
| 147 | { | 170 | { |
| 148 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | 171 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
| @@ -227,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, | |||
| 227 | return read_cache_page(mapping, index, filler, data); | 250 | return read_cache_page(mapping, index, filler, data); |
| 228 | } | 251 | } |
| 229 | 252 | ||
| 230 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
| 231 | pgoff_t index, gfp_t gfp_mask); | ||
| 232 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| 233 | pgoff_t index, gfp_t gfp_mask); | ||
| 234 | extern void remove_from_page_cache(struct page *page); | ||
| 235 | extern void __remove_from_page_cache(struct page *page); | ||
| 236 | |||
| 237 | /* | ||
| 238 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
| 239 | * the page is new, so we can just run SetPageLocked() against it. | ||
| 240 | */ | ||
| 241 | static inline int add_to_page_cache(struct page *page, | ||
| 242 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
| 243 | { | ||
| 244 | int error; | ||
| 245 | |||
| 246 | SetPageLocked(page); | ||
| 247 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
| 248 | if (unlikely(error)) | ||
| 249 | ClearPageLocked(page); | ||
| 250 | return error; | ||
| 251 | } | ||
| 252 | |||
| 253 | /* | 253 | /* |
| 254 | * Return byte-offset into filesystem object for page. | 254 | * Return byte-offset into filesystem object for page. |
| 255 | */ | 255 | */ |
| @@ -271,13 +271,28 @@ extern int __lock_page_killable(struct page *page); | |||
| 271 | extern void __lock_page_nosync(struct page *page); | 271 | extern void __lock_page_nosync(struct page *page); |
| 272 | extern void unlock_page(struct page *page); | 272 | extern void unlock_page(struct page *page); |
| 273 | 273 | ||
| 274 | static inline void set_page_locked(struct page *page) | ||
| 275 | { | ||
| 276 | set_bit(PG_locked, &page->flags); | ||
| 277 | } | ||
| 278 | |||
| 279 | static inline void clear_page_locked(struct page *page) | ||
| 280 | { | ||
| 281 | clear_bit(PG_locked, &page->flags); | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline int trylock_page(struct page *page) | ||
| 285 | { | ||
| 286 | return !test_and_set_bit(PG_locked, &page->flags); | ||
| 287 | } | ||
| 288 | |||
| 274 | /* | 289 | /* |
| 275 | * lock_page may only be called if we have the page's inode pinned. | 290 | * lock_page may only be called if we have the page's inode pinned. |
| 276 | */ | 291 | */ |
| 277 | static inline void lock_page(struct page *page) | 292 | static inline void lock_page(struct page *page) |
| 278 | { | 293 | { |
| 279 | might_sleep(); | 294 | might_sleep(); |
| 280 | if (TestSetPageLocked(page)) | 295 | if (!trylock_page(page)) |
| 281 | __lock_page(page); | 296 | __lock_page(page); |
| 282 | } | 297 | } |
| 283 | 298 | ||
| @@ -289,7 +304,7 @@ static inline void lock_page(struct page *page) | |||
| 289 | static inline int lock_page_killable(struct page *page) | 304 | static inline int lock_page_killable(struct page *page) |
| 290 | { | 305 | { |
| 291 | might_sleep(); | 306 | might_sleep(); |
| 292 | if (TestSetPageLocked(page)) | 307 | if (!trylock_page(page)) |
| 293 | return __lock_page_killable(page); | 308 | return __lock_page_killable(page); |
| 294 | return 0; | 309 | return 0; |
| 295 | } | 310 | } |
| @@ -301,7 +316,7 @@ static inline int lock_page_killable(struct page *page) | |||
| 301 | static inline void lock_page_nosync(struct page *page) | 316 | static inline void lock_page_nosync(struct page *page) |
| 302 | { | 317 | { |
| 303 | might_sleep(); | 318 | might_sleep(); |
| 304 | if (TestSetPageLocked(page)) | 319 | if (!trylock_page(page)) |
| 305 | __lock_page_nosync(page); | 320 | __lock_page_nosync(page); |
| 306 | } | 321 | } |
| 307 | 322 | ||
| @@ -386,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
| 386 | return ret; | 401 | return ret; |
| 387 | } | 402 | } |
| 388 | 403 | ||
| 404 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
| 405 | pgoff_t index, gfp_t gfp_mask); | ||
| 406 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
| 407 | pgoff_t index, gfp_t gfp_mask); | ||
| 408 | extern void remove_from_page_cache(struct page *page); | ||
| 409 | extern void __remove_from_page_cache(struct page *page); | ||
| 410 | |||
| 411 | /* | ||
| 412 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
| 413 | * the page is new, so we can just run set_page_locked() against it. | ||
| 414 | */ | ||
| 415 | static inline int add_to_page_cache(struct page *page, | ||
| 416 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
| 417 | { | ||
| 418 | int error; | ||
| 419 | |||
| 420 | set_page_locked(page); | ||
| 421 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
| 422 | if (unlikely(error)) | ||
| 423 | clear_page_locked(page); | ||
| 424 | return error; | ||
| 425 | } | ||
| 426 | |||
| 389 | #endif /* _LINUX_PAGEMAP_H */ | 427 | #endif /* _LINUX_PAGEMAP_H */ |
