aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c21
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/readahead.c40
-rw-r--r--mm/swap.c4
-rw-r--r--mm/truncate.c10
-rw-r--r--mm/vmscan.c6
6 files changed, 74 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 126d3973b3d1..fc11974f2bee 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -565,6 +565,24 @@ void wait_on_page_bit(struct page *page, int bit_nr)
565EXPORT_SYMBOL(wait_on_page_bit); 565EXPORT_SYMBOL(wait_on_page_bit);
566 566
567/** 567/**
568 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
569 * @page - Page defining the wait queue of interest
570 * @waiter - Waiter to add to the queue
571 *
572 * Add an arbitrary @waiter to the wait queue for the nominated @page.
573 */
574void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
575{
576 wait_queue_head_t *q = page_waitqueue(page);
577 unsigned long flags;
578
579 spin_lock_irqsave(&q->lock, flags);
580 __add_wait_queue(q, waiter);
581 spin_unlock_irqrestore(&q->lock, flags);
582}
583EXPORT_SYMBOL_GPL(add_page_wait_queue);
584
585/**
568 * unlock_page - unlock a locked page 586 * unlock_page - unlock a locked page
569 * @page: the page 587 * @page: the page
570 * 588 *
@@ -2463,6 +2481,9 @@ EXPORT_SYMBOL(generic_file_aio_write);
2463 * (presumably at page->private). If the release was successful, return `1'. 2481 * (presumably at page->private). If the release was successful, return `1'.
2464 * Otherwise return zero. 2482 * Otherwise return zero.
2465 * 2483 *
2484 * This may also be called if PG_fscache is set on a page, indicating that the
2485 * page is known to the local caching routines.
2486 *
2466 * The @gfp_mask argument specifies whether I/O may be performed to release 2487 * The @gfp_mask argument specifies whether I/O may be performed to release
2467 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2488 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2468 * 2489 *
diff --git a/mm/migrate.c b/mm/migrate.c
index a9eff3f092f6..068655d8f883 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -250,7 +250,7 @@ out:
250 * The number of remaining references must be: 250 * The number of remaining references must be:
251 * 1 for anonymous pages without a mapping 251 * 1 for anonymous pages without a mapping
252 * 2 for pages with a mapping 252 * 2 for pages with a mapping
253 * 3 for pages with a mapping and PagePrivate set. 253 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
254 */ 254 */
255static int migrate_page_move_mapping(struct address_space *mapping, 255static int migrate_page_move_mapping(struct address_space *mapping,
256 struct page *newpage, struct page *page) 256 struct page *newpage, struct page *page)
@@ -270,7 +270,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
270 pslot = radix_tree_lookup_slot(&mapping->page_tree, 270 pslot = radix_tree_lookup_slot(&mapping->page_tree,
271 page_index(page)); 271 page_index(page));
272 272
273 expected_count = 2 + !!PagePrivate(page); 273 expected_count = 2 + !!page_has_private(page);
274 if (page_count(page) != expected_count || 274 if (page_count(page) != expected_count ||
275 (struct page *)radix_tree_deref_slot(pslot) != page) { 275 (struct page *)radix_tree_deref_slot(pslot) != page) {
276 spin_unlock_irq(&mapping->tree_lock); 276 spin_unlock_irq(&mapping->tree_lock);
@@ -386,7 +386,7 @@ EXPORT_SYMBOL(fail_migrate_page);
386 386
387/* 387/*
388 * Common logic to directly migrate a single page suitable for 388 * Common logic to directly migrate a single page suitable for
389 * pages that do not use PagePrivate. 389 * pages that do not use PagePrivate/PagePrivate2.
390 * 390 *
391 * Pages are locked upon entry and exit. 391 * Pages are locked upon entry and exit.
392 */ 392 */
@@ -522,7 +522,7 @@ static int fallback_migrate_page(struct address_space *mapping,
522 * Buffers may be managed in a filesystem specific way. 522 * Buffers may be managed in a filesystem specific way.
523 * We must have no buffers or drop them. 523 * We must have no buffers or drop them.
524 */ 524 */
525 if (PagePrivate(page) && 525 if (page_has_private(page) &&
526 !try_to_release_page(page, GFP_KERNEL)) 526 !try_to_release_page(page, GFP_KERNEL))
527 return -EAGAIN; 527 return -EAGAIN;
528 528
@@ -655,7 +655,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
655 * free the metadata, so the page can be freed. 655 * free the metadata, so the page can be freed.
656 */ 656 */
657 if (!page->mapping) { 657 if (!page->mapping) {
658 if (!PageAnon(page) && PagePrivate(page)) { 658 if (!PageAnon(page) && page_has_private(page)) {
659 /* 659 /*
660 * Go direct to try_to_free_buffers() here because 660 * Go direct to try_to_free_buffers() here because
661 * a) that's what try_to_release_page() would do anyway 661 * a) that's what try_to_release_page() would do anyway
diff --git a/mm/readahead.c b/mm/readahead.c
index 9ce303d4b810..133b6d525513 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -31,6 +31,42 @@ EXPORT_SYMBOL_GPL(file_ra_state_init);
31 31
32#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 32#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
33 33
34/*
35 * see if a page needs releasing upon read_cache_pages() failure
36 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
37 * before calling, such as the NFS fs marking pages that are cached locally
38 * on disk, thus we need to give the fs a chance to clean up in the event of
39 * an error
40 */
41static void read_cache_pages_invalidate_page(struct address_space *mapping,
42 struct page *page)
43{
44 if (page_has_private(page)) {
45 if (!trylock_page(page))
46 BUG();
47 page->mapping = mapping;
48 do_invalidatepage(page, 0);
49 page->mapping = NULL;
50 unlock_page(page);
51 }
52 page_cache_release(page);
53}
54
55/*
56 * release a list of pages, invalidating them first if need be
57 */
58static void read_cache_pages_invalidate_pages(struct address_space *mapping,
59 struct list_head *pages)
60{
61 struct page *victim;
62
63 while (!list_empty(pages)) {
64 victim = list_to_page(pages);
65 list_del(&victim->lru);
66 read_cache_pages_invalidate_page(mapping, victim);
67 }
68}
69
34/** 70/**
35 * read_cache_pages - populate an address space with some pages & start reads against them 71 * read_cache_pages - populate an address space with some pages & start reads against them
36 * @mapping: the address_space 72 * @mapping: the address_space
@@ -52,14 +88,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
52 list_del(&page->lru); 88 list_del(&page->lru);
53 if (add_to_page_cache_lru(page, mapping, 89 if (add_to_page_cache_lru(page, mapping,
54 page->index, GFP_KERNEL)) { 90 page->index, GFP_KERNEL)) {
55 page_cache_release(page); 91 read_cache_pages_invalidate_page(mapping, page);
56 continue; 92 continue;
57 } 93 }
58 page_cache_release(page); 94 page_cache_release(page);
59 95
60 ret = filler(data, page); 96 ret = filler(data, page);
61 if (unlikely(ret)) { 97 if (unlikely(ret)) {
62 put_pages_list(pages); 98 read_cache_pages_invalidate_pages(mapping, pages);
63 break; 99 break;
64 } 100 }
65 task_io_account_read(PAGE_CACHE_SIZE); 101 task_io_account_read(PAGE_CACHE_SIZE);
diff --git a/mm/swap.c b/mm/swap.c
index 6e83084c1f6c..bede23ce64ea 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -448,8 +448,8 @@ void pagevec_strip(struct pagevec *pvec)
448 for (i = 0; i < pagevec_count(pvec); i++) { 448 for (i = 0; i < pagevec_count(pvec); i++) {
449 struct page *page = pvec->pages[i]; 449 struct page *page = pvec->pages[i];
450 450
451 if (PagePrivate(page) && trylock_page(page)) { 451 if (page_has_private(page) && trylock_page(page)) {
452 if (PagePrivate(page)) 452 if (page_has_private(page))
453 try_to_release_page(page, 0); 453 try_to_release_page(page, 0);
454 unlock_page(page); 454 unlock_page(page);
455 } 455 }
diff --git a/mm/truncate.c b/mm/truncate.c
index 1229211104f8..55206fab7b99 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -50,7 +50,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
50static inline void truncate_partial_page(struct page *page, unsigned partial) 50static inline void truncate_partial_page(struct page *page, unsigned partial)
51{ 51{
52 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
53 if (PagePrivate(page)) 53 if (page_has_private(page))
54 do_invalidatepage(page, partial); 54 do_invalidatepage(page, partial);
55} 55}
56 56
@@ -99,7 +99,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
99 if (page->mapping != mapping) 99 if (page->mapping != mapping)
100 return; 100 return;
101 101
102 if (PagePrivate(page)) 102 if (page_has_private(page))
103 do_invalidatepage(page, 0); 103 do_invalidatepage(page, 0);
104 104
105 cancel_dirty_page(page, PAGE_CACHE_SIZE); 105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
@@ -126,7 +126,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
126 if (page->mapping != mapping) 126 if (page->mapping != mapping)
127 return 0; 127 return 0;
128 128
129 if (PagePrivate(page) && !try_to_release_page(page, 0)) 129 if (page_has_private(page) && !try_to_release_page(page, 0))
130 return 0; 130 return 0;
131 131
132 clear_page_mlock(page); 132 clear_page_mlock(page);
@@ -348,7 +348,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
348 if (page->mapping != mapping) 348 if (page->mapping != mapping)
349 return 0; 349 return 0;
350 350
351 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 351 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
352 return 0; 352 return 0;
353 353
354 spin_lock_irq(&mapping->tree_lock); 354 spin_lock_irq(&mapping->tree_lock);
@@ -356,7 +356,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
356 goto failed; 356 goto failed;
357 357
358 clear_page_mlock(page); 358 clear_page_mlock(page);
359 BUG_ON(PagePrivate(page)); 359 BUG_ON(page_has_private(page));
360 __remove_from_page_cache(page); 360 __remove_from_page_cache(page);
361 spin_unlock_irq(&mapping->tree_lock); 361 spin_unlock_irq(&mapping->tree_lock);
362 page_cache_release(page); /* pagecache ref */ 362 page_cache_release(page); /* pagecache ref */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 06e72693b458..425244988bb2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -283,7 +283,7 @@ static inline int page_mapping_inuse(struct page *page)
283 283
284static inline int is_page_cache_freeable(struct page *page) 284static inline int is_page_cache_freeable(struct page *page)
285{ 285{
286 return page_count(page) - !!PagePrivate(page) == 2; 286 return page_count(page) - !!page_has_private(page) == 2;
287} 287}
288 288
289static int may_write_to_queue(struct backing_dev_info *bdi) 289static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -367,7 +367,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
367 * Some data journaling orphaned pages can have 367 * Some data journaling orphaned pages can have
368 * page->mapping == NULL while being dirty with clean buffers. 368 * page->mapping == NULL while being dirty with clean buffers.
369 */ 369 */
370 if (PagePrivate(page)) { 370 if (page_has_private(page)) {
371 if (try_to_free_buffers(page)) { 371 if (try_to_free_buffers(page)) {
372 ClearPageDirty(page); 372 ClearPageDirty(page);
373 printk("%s: orphaned page\n", __func__); 373 printk("%s: orphaned page\n", __func__);
@@ -727,7 +727,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
727 * process address space (page_count == 1) it can be freed. 727 * process address space (page_count == 1) it can be freed.
728 * Otherwise, leave the page on the LRU so it is swappable. 728 * Otherwise, leave the page on the LRU so it is swappable.
729 */ 729 */
730 if (PagePrivate(page)) { 730 if (page_has_private(page)) {
731 if (!try_to_release_page(page, sc->gfp_mask)) 731 if (!try_to_release_page(page, sc->gfp_mask))
732 goto activate_locked; 732 goto activate_locked;
733 if (!mapping && page_count(page) == 1) { 733 if (!mapping && page_count(page) == 1) {