aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 11:54:49 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-28 11:54:49 -0400
commitd1a76187a5be4f89c6cb19d800cb5fb7aac735c5 (patch)
tree2fac3ffbfffc7560eeef8364b541d0d7a0057920 /mm/filemap.c
parentc7e78cff6b7518212247fb20b1dc6411540dc9af (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into core/locking
Conflicts: arch/um/include/asm/system.h
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c55
1 files changed, 34 insertions, 21 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 876bc595d0f8..ab8553658af3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,6 +33,7 @@
33#include <linux/cpuset.h> 33#include <linux/cpuset.h>
34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35#include <linux/memcontrol.h> 35#include <linux/memcontrol.h>
36#include <linux/mm_inline.h> /* for page_is_file_cache() */
36#include "internal.h" 37#include "internal.h"
37 38
38/* 39/*
@@ -115,12 +116,12 @@ void __remove_from_page_cache(struct page *page)
115{ 116{
116 struct address_space *mapping = page->mapping; 117 struct address_space *mapping = page->mapping;
117 118
118 mem_cgroup_uncharge_cache_page(page);
119 radix_tree_delete(&mapping->page_tree, page->index); 119 radix_tree_delete(&mapping->page_tree, page->index);
120 page->mapping = NULL; 120 page->mapping = NULL;
121 mapping->nrpages--; 121 mapping->nrpages--;
122 __dec_zone_page_state(page, NR_FILE_PAGES); 122 __dec_zone_page_state(page, NR_FILE_PAGES);
123 BUG_ON(page_mapped(page)); 123 BUG_ON(page_mapped(page));
124 mem_cgroup_uncharge_cache_page(page);
124 125
125 /* 126 /*
126 * Some filesystems seem to re-dirty the page even after 127 * Some filesystems seem to re-dirty the page even after
@@ -492,9 +493,24 @@ EXPORT_SYMBOL(add_to_page_cache_locked);
492int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 493int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
493 pgoff_t offset, gfp_t gfp_mask) 494 pgoff_t offset, gfp_t gfp_mask)
494{ 495{
495 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 496 int ret;
496 if (ret == 0) 497
497 lru_cache_add(page); 498 /*
499 * Splice_read and readahead add shmem/tmpfs pages into the page cache
500 * before shmem_readpage has a chance to mark them as SwapBacked: they
501 * need to go on the active_anon lru below, and mem_cgroup_cache_charge
502 * (called in add_to_page_cache) needs to know where they're going too.
503 */
504 if (mapping_cap_swap_backed(mapping))
505 SetPageSwapBacked(page);
506
507 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
508 if (ret == 0) {
509 if (page_is_file_cache(page))
510 lru_cache_add_file(page);
511 else
512 lru_cache_add_active_anon(page);
513 }
498 return ret; 514 return ret;
499} 515}
500 516
@@ -557,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
557 * mechananism between PageLocked pages and PageWriteback pages is shared. 573 * mechananism between PageLocked pages and PageWriteback pages is shared.
558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 574 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
559 * 575 *
560 * The first mb is necessary to safely close the critical section opened by the 576 * The mb is necessary to enforce ordering between the clear_bit and the read
561 * test_and_set_bit() to lock the page; the second mb is necessary to enforce 577 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
562 * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
563 * races with a parallel wait_on_page_locked()).
564 */ 578 */
565void unlock_page(struct page *page) 579void unlock_page(struct page *page)
566{ 580{
567 smp_mb__before_clear_bit(); 581 VM_BUG_ON(!PageLocked(page));
568 if (!test_and_clear_bit(PG_locked, &page->flags)) 582 clear_bit_unlock(PG_locked, &page->flags);
569 BUG(); 583 smp_mb__after_clear_bit();
570 smp_mb__after_clear_bit();
571 wake_up_page(page, PG_locked); 584 wake_up_page(page, PG_locked);
572} 585}
573EXPORT_SYMBOL(unlock_page); 586EXPORT_SYMBOL(unlock_page);
@@ -1100,8 +1113,9 @@ page_ok:
1100 1113
1101page_not_up_to_date: 1114page_not_up_to_date:
1102 /* Get exclusive access to the page ... */ 1115 /* Get exclusive access to the page ... */
1103 if (lock_page_killable(page)) 1116 error = lock_page_killable(page);
1104 goto readpage_eio; 1117 if (unlikely(error))
1118 goto readpage_error;
1105 1119
1106page_not_up_to_date_locked: 1120page_not_up_to_date_locked:
1107 /* Did it get truncated before we got the lock? */ 1121 /* Did it get truncated before we got the lock? */
@@ -1130,8 +1144,9 @@ readpage:
1130 } 1144 }
1131 1145
1132 if (!PageUptodate(page)) { 1146 if (!PageUptodate(page)) {
1133 if (lock_page_killable(page)) 1147 error = lock_page_killable(page);
1134 goto readpage_eio; 1148 if (unlikely(error))
1149 goto readpage_error;
1135 if (!PageUptodate(page)) { 1150 if (!PageUptodate(page)) {
1136 if (page->mapping == NULL) { 1151 if (page->mapping == NULL) {
1137 /* 1152 /*
@@ -1143,15 +1158,14 @@ readpage:
1143 } 1158 }
1144 unlock_page(page); 1159 unlock_page(page);
1145 shrink_readahead_size_eio(filp, ra); 1160 shrink_readahead_size_eio(filp, ra);
1146 goto readpage_eio; 1161 error = -EIO;
1162 goto readpage_error;
1147 } 1163 }
1148 unlock_page(page); 1164 unlock_page(page);
1149 } 1165 }
1150 1166
1151 goto page_ok; 1167 goto page_ok;
1152 1168
1153readpage_eio:
1154 error = -EIO;
1155readpage_error: 1169readpage_error:
1156 /* UHHUH! A synchronous read error occurred. Report it */ 1170 /* UHHUH! A synchronous read error occurred. Report it */
1157 desc->error = error; 1171 desc->error = error;
@@ -1186,8 +1200,7 @@ out:
1186 ra->prev_pos |= prev_offset; 1200 ra->prev_pos |= prev_offset;
1187 1201
1188 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1202 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1189 if (filp) 1203 file_accessed(filp);
1190 file_accessed(filp);
1191} 1204}
1192 1205
1193int file_read_actor(read_descriptor_t *desc, struct page *page, 1206int file_read_actor(read_descriptor_t *desc, struct page *page,