aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h107
1 files changed, 101 insertions, 6 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c16fb6d06e36..0a97b583ee8d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -259,12 +259,109 @@ pgoff_t page_cache_next_hole(struct address_space *mapping,
259pgoff_t page_cache_prev_hole(struct address_space *mapping, 259pgoff_t page_cache_prev_hole(struct address_space *mapping,
260 pgoff_t index, unsigned long max_scan); 260 pgoff_t index, unsigned long max_scan);
261 261
262#define FGP_ACCESSED 0x00000001
263#define FGP_LOCK 0x00000002
264#define FGP_CREAT 0x00000004
265#define FGP_WRITE 0x00000008
266#define FGP_NOFS 0x00000010
267#define FGP_NOWAIT 0x00000020
268
269struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
270 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
271
272/**
273 * find_get_page - find and get a page reference
274 * @mapping: the address_space to search
275 * @offset: the page index
276 *
277 * Looks up the page cache slot at @mapping & @offset. If there is a
278 * page cache page, it is returned with an increased refcount.
279 *
280 * Otherwise, %NULL is returned.
281 */
282static inline struct page *find_get_page(struct address_space *mapping,
283 pgoff_t offset)
284{
285 return pagecache_get_page(mapping, offset, 0, 0, 0);
286}
287
288static inline struct page *find_get_page_flags(struct address_space *mapping,
289 pgoff_t offset, int fgp_flags)
290{
291 return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
292}
293
294/**
295 * find_lock_page - locate, pin and lock a pagecache page
296 * pagecache_get_page - find and get a page reference
297 * @mapping: the address_space to search
298 * @offset: the page index
299 *
300 * Looks up the page cache slot at @mapping & @offset. If there is a
301 * page cache page, it is returned locked and with an increased
302 * refcount.
303 *
304 * Otherwise, %NULL is returned.
305 *
306 * find_lock_page() may sleep.
307 */
308static inline struct page *find_lock_page(struct address_space *mapping,
309 pgoff_t offset)
310{
311 return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
312}
313
314/**
315 * find_or_create_page - locate or add a pagecache page
316 * @mapping: the page's address_space
317 * @index: the page's index into the mapping
318 * @gfp_mask: page allocation mode
319 *
320 * Looks up the page cache slot at @mapping & @offset. If there is a
321 * page cache page, it is returned locked and with an increased
322 * refcount.
323 *
324 * If the page is not present, a new page is allocated using @gfp_mask
325 * and added to the page cache and the VM's LRU list. The page is
326 * returned locked and with an increased refcount.
327 *
328 * On memory exhaustion, %NULL is returned.
329 *
330 * find_or_create_page() may sleep, even if @gfp_flags specifies an
331 * atomic allocation!
332 */
333static inline struct page *find_or_create_page(struct address_space *mapping,
334 pgoff_t offset, gfp_t gfp_mask)
335{
336 return pagecache_get_page(mapping, offset,
337 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
338 gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
339}
340
341/**
342 * grab_cache_page_nowait - returns locked page at given index in given cache
343 * @mapping: target address_space
344 * @index: the page index
345 *
346 * Same as grab_cache_page(), but do not wait if the page is unavailable.
347 * This is intended for speculative data generators, where the data can
348 * be regenerated if the page couldn't be grabbed. This routine should
349 * be safe to call while holding the lock for another page.
350 *
351 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
352 * and deadlock against the caller's locked page.
353 */
354static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
355 pgoff_t index)
356{
357 return pagecache_get_page(mapping, index,
358 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
359 mapping_gfp_mask(mapping),
360 GFP_NOFS);
361}
362
262struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 363struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
263struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
264struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); 364struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
265struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
266struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
267 gfp_t gfp_mask);
268unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 365unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
269 unsigned int nr_entries, struct page **entries, 366 unsigned int nr_entries, struct page **entries,
270 pgoff_t *indices); 367 pgoff_t *indices);
@@ -287,8 +384,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
287 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 384 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
288} 385}
289 386
290extern struct page * grab_cache_page_nowait(struct address_space *mapping,
291 pgoff_t index);
292extern struct page * read_cache_page(struct address_space *mapping, 387extern struct page * read_cache_page(struct address_space *mapping,
293 pgoff_t index, filler_t *filler, void *data); 388 pgoff_t index, filler_t *filler, void *data);
294extern struct page * read_cache_page_gfp(struct address_space *mapping, 389extern struct page * read_cache_page_gfp(struct address_space *mapping,