diff options
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 91 |
1 files changed, 65 insertions, 26 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a81d81890422..5da31c12101c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ | 21 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ |
22 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ | 22 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ |
23 | #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ | ||
23 | 24 | ||
24 | static inline void mapping_set_error(struct address_space *mapping, int error) | 25 | static inline void mapping_set_error(struct address_space *mapping, int error) |
25 | { | 26 | { |
@@ -142,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page) | |||
142 | return 1; | 143 | return 1; |
143 | } | 144 | } |
144 | 145 | ||
146 | /* | ||
147 | * Same as above, but add instead of inc (could just be merged) | ||
148 | */ | ||
149 | static inline int page_cache_add_speculative(struct page *page, int count) | ||
150 | { | ||
151 | VM_BUG_ON(in_interrupt()); | ||
152 | |||
153 | #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) | ||
154 | # ifdef CONFIG_PREEMPT | ||
155 | VM_BUG_ON(!in_atomic()); | ||
156 | # endif | ||
157 | VM_BUG_ON(page_count(page) == 0); | ||
158 | atomic_add(count, &page->_count); | ||
159 | |||
160 | #else | ||
161 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | ||
162 | return 0; | ||
163 | #endif | ||
164 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | ||
165 | |||
166 | return 1; | ||
167 | } | ||
168 | |||
145 | static inline int page_freeze_refs(struct page *page, int count) | 169 | static inline int page_freeze_refs(struct page *page, int count) |
146 | { | 170 | { |
147 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | 171 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
@@ -226,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, | |||
226 | return read_cache_page(mapping, index, filler, data); | 250 | return read_cache_page(mapping, index, filler, data); |
227 | } | 251 | } |
228 | 252 | ||
229 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
230 | pgoff_t index, gfp_t gfp_mask); | ||
231 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
232 | pgoff_t index, gfp_t gfp_mask); | ||
233 | extern void remove_from_page_cache(struct page *page); | ||
234 | extern void __remove_from_page_cache(struct page *page); | ||
235 | |||
236 | /* | ||
237 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
238 | * the page is new, so we can just run SetPageLocked() against it. | ||
239 | */ | ||
240 | static inline int add_to_page_cache(struct page *page, | ||
241 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
242 | { | ||
243 | int error; | ||
244 | |||
245 | SetPageLocked(page); | ||
246 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
247 | if (unlikely(error)) | ||
248 | ClearPageLocked(page); | ||
249 | return error; | ||
250 | } | ||
251 | |||
252 | /* | 253 | /* |
253 | * Return byte-offset into filesystem object for page. | 254 | * Return byte-offset into filesystem object for page. |
254 | */ | 255 | */ |
@@ -270,13 +271,28 @@ extern int __lock_page_killable(struct page *page); | |||
270 | extern void __lock_page_nosync(struct page *page); | 271 | extern void __lock_page_nosync(struct page *page); |
271 | extern void unlock_page(struct page *page); | 272 | extern void unlock_page(struct page *page); |
272 | 273 | ||
274 | static inline void set_page_locked(struct page *page) | ||
275 | { | ||
276 | set_bit(PG_locked, &page->flags); | ||
277 | } | ||
278 | |||
279 | static inline void clear_page_locked(struct page *page) | ||
280 | { | ||
281 | clear_bit(PG_locked, &page->flags); | ||
282 | } | ||
283 | |||
284 | static inline int trylock_page(struct page *page) | ||
285 | { | ||
286 | return !test_and_set_bit(PG_locked, &page->flags); | ||
287 | } | ||
288 | |||
273 | /* | 289 | /* |
274 | * lock_page may only be called if we have the page's inode pinned. | 290 | * lock_page may only be called if we have the page's inode pinned. |
275 | */ | 291 | */ |
276 | static inline void lock_page(struct page *page) | 292 | static inline void lock_page(struct page *page) |
277 | { | 293 | { |
278 | might_sleep(); | 294 | might_sleep(); |
279 | if (TestSetPageLocked(page)) | 295 | if (!trylock_page(page)) |
280 | __lock_page(page); | 296 | __lock_page(page); |
281 | } | 297 | } |
282 | 298 | ||
@@ -288,7 +304,7 @@ static inline void lock_page(struct page *page) | |||
288 | static inline int lock_page_killable(struct page *page) | 304 | static inline int lock_page_killable(struct page *page) |
289 | { | 305 | { |
290 | might_sleep(); | 306 | might_sleep(); |
291 | if (TestSetPageLocked(page)) | 307 | if (!trylock_page(page)) |
292 | return __lock_page_killable(page); | 308 | return __lock_page_killable(page); |
293 | return 0; | 309 | return 0; |
294 | } | 310 | } |
@@ -300,7 +316,7 @@ static inline int lock_page_killable(struct page *page) | |||
300 | static inline void lock_page_nosync(struct page *page) | 316 | static inline void lock_page_nosync(struct page *page) |
301 | { | 317 | { |
302 | might_sleep(); | 318 | might_sleep(); |
303 | if (TestSetPageLocked(page)) | 319 | if (!trylock_page(page)) |
304 | __lock_page_nosync(page); | 320 | __lock_page_nosync(page); |
305 | } | 321 | } |
306 | 322 | ||
@@ -385,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
385 | return ret; | 401 | return ret; |
386 | } | 402 | } |
387 | 403 | ||
404 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
405 | pgoff_t index, gfp_t gfp_mask); | ||
406 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
407 | pgoff_t index, gfp_t gfp_mask); | ||
408 | extern void remove_from_page_cache(struct page *page); | ||
409 | extern void __remove_from_page_cache(struct page *page); | ||
410 | |||
411 | /* | ||
412 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
413 | * the page is new, so we can just run set_page_locked() against it. | ||
414 | */ | ||
415 | static inline int add_to_page_cache(struct page *page, | ||
416 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
417 | { | ||
418 | int error; | ||
419 | |||
420 | set_page_locked(page); | ||
421 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
422 | if (unlikely(error)) | ||
423 | clear_page_locked(page); | ||
424 | return error; | ||
425 | } | ||
426 | |||
388 | #endif /* _LINUX_PAGEMAP_H */ | 427 | #endif /* _LINUX_PAGEMAP_H */ |