diff options
author | Nick Piggin <npiggin@suse.de> | 2008-08-02 06:01:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-05 00:31:34 -0400 |
commit | 529ae9aaa08378cfe2a4350bded76f32cc8ff0ce (patch) | |
tree | d3ae998f9876c72a83a022805103a92111852b21 /include/linux | |
parent | e9ba9698187ddbc0c5bfcf41de0349a662d23d02 (diff) |
mm: rename page trylock
Converting page lock to new locking bitops requires a change of page flag
operation naming, so we might as well convert it to something nicer
(!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked).
This also facilitates lockdeping of page lock.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/page-flags.h | 2 | ||||
-rw-r--r-- | include/linux/pagemap.h | 67 |
2 files changed, 42 insertions, 27 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 25aaccdb2f26..c74d3e875314 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ | |||
163 | 163 | ||
164 | struct page; /* forward declaration */ | 164 | struct page; /* forward declaration */ |
165 | 165 | ||
166 | PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) | 166 | TESTPAGEFLAG(Locked, locked) |
167 | PAGEFLAG(Error, error) | 167 | PAGEFLAG(Error, error) |
168 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) | 168 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) |
169 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) | 169 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 69ed3cb1197a..5da31c12101c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, | |||
250 | return read_cache_page(mapping, index, filler, data); | 250 | return read_cache_page(mapping, index, filler, data); |
251 | } | 251 | } |
252 | 252 | ||
253 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
254 | pgoff_t index, gfp_t gfp_mask); | ||
255 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
256 | pgoff_t index, gfp_t gfp_mask); | ||
257 | extern void remove_from_page_cache(struct page *page); | ||
258 | extern void __remove_from_page_cache(struct page *page); | ||
259 | |||
260 | /* | ||
261 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
262 | * the page is new, so we can just run SetPageLocked() against it. | ||
263 | */ | ||
264 | static inline int add_to_page_cache(struct page *page, | ||
265 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
266 | { | ||
267 | int error; | ||
268 | |||
269 | SetPageLocked(page); | ||
270 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
271 | if (unlikely(error)) | ||
272 | ClearPageLocked(page); | ||
273 | return error; | ||
274 | } | ||
275 | |||
276 | /* | 253 | /* |
277 | * Return byte-offset into filesystem object for page. | 254 | * Return byte-offset into filesystem object for page. |
278 | */ | 255 | */ |
@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page); | |||
294 | extern void __lock_page_nosync(struct page *page); | 271 | extern void __lock_page_nosync(struct page *page); |
295 | extern void unlock_page(struct page *page); | 272 | extern void unlock_page(struct page *page); |
296 | 273 | ||
274 | static inline void set_page_locked(struct page *page) | ||
275 | { | ||
276 | set_bit(PG_locked, &page->flags); | ||
277 | } | ||
278 | |||
279 | static inline void clear_page_locked(struct page *page) | ||
280 | { | ||
281 | clear_bit(PG_locked, &page->flags); | ||
282 | } | ||
283 | |||
284 | static inline int trylock_page(struct page *page) | ||
285 | { | ||
286 | return !test_and_set_bit(PG_locked, &page->flags); | ||
287 | } | ||
288 | |||
297 | /* | 289 | /* |
298 | * lock_page may only be called if we have the page's inode pinned. | 290 | * lock_page may only be called if we have the page's inode pinned. |
299 | */ | 291 | */ |
300 | static inline void lock_page(struct page *page) | 292 | static inline void lock_page(struct page *page) |
301 | { | 293 | { |
302 | might_sleep(); | 294 | might_sleep(); |
303 | if (TestSetPageLocked(page)) | 295 | if (!trylock_page(page)) |
304 | __lock_page(page); | 296 | __lock_page(page); |
305 | } | 297 | } |
306 | 298 | ||
@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page) | |||
312 | static inline int lock_page_killable(struct page *page) | 304 | static inline int lock_page_killable(struct page *page) |
313 | { | 305 | { |
314 | might_sleep(); | 306 | might_sleep(); |
315 | if (TestSetPageLocked(page)) | 307 | if (!trylock_page(page)) |
316 | return __lock_page_killable(page); | 308 | return __lock_page_killable(page); |
317 | return 0; | 309 | return 0; |
318 | } | 310 | } |
@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page) | |||
324 | static inline void lock_page_nosync(struct page *page) | 316 | static inline void lock_page_nosync(struct page *page) |
325 | { | 317 | { |
326 | might_sleep(); | 318 | might_sleep(); |
327 | if (TestSetPageLocked(page)) | 319 | if (!trylock_page(page)) |
328 | __lock_page_nosync(page); | 320 | __lock_page_nosync(page); |
329 | } | 321 | } |
330 | 322 | ||
@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
409 | return ret; | 401 | return ret; |
410 | } | 402 | } |
411 | 403 | ||
404 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
405 | pgoff_t index, gfp_t gfp_mask); | ||
406 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | ||
407 | pgoff_t index, gfp_t gfp_mask); | ||
408 | extern void remove_from_page_cache(struct page *page); | ||
409 | extern void __remove_from_page_cache(struct page *page); | ||
410 | |||
411 | /* | ||
412 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | ||
413 | * the page is new, so we can just run set_page_locked() against it. | ||
414 | */ | ||
415 | static inline int add_to_page_cache(struct page *page, | ||
416 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | ||
417 | { | ||
418 | int error; | ||
419 | |||
420 | set_page_locked(page); | ||
421 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); | ||
422 | if (unlikely(error)) | ||
423 | clear_page_locked(page); | ||
424 | return error; | ||
425 | } | ||
426 | |||
412 | #endif /* _LINUX_PAGEMAP_H */ | 427 | #endif /* _LINUX_PAGEMAP_H */ |