aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2006-06-23 05:05:08 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:43:02 -0400
commit090d2b185d8680fc26a2eaf4245d4171dcf4baf1 (patch)
tree67e604e3cd1bad3cfd034bced19a0fbff6a80c30 /fs
parentc330dda908b5a46469a997eea90b66f2f9f02b34 (diff)
[PATCH] read_mapping_page for address space
Add read_mapping_page() which is used for callers that pass mapping->a_ops->readpage as the filler for read_cache_page. This removes some duplication from filesystem code. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/mntpt.c11
-rw-r--r--fs/cramfs/inode.c4
-rw-r--r--fs/ext2/dir.c3
-rw-r--r--fs/freevxfs/vxfs_subr.c3
-rw-r--r--fs/hfs/bnode.c2
-rw-r--r--fs/hfs/btree.c2
-rw-r--r--fs/hfsplus/bitmap.c15
-rw-r--r--fs/hfsplus/bnode.c2
-rw-r--r--fs/hfsplus/btree.c2
-rw-r--r--fs/jfs/jfs_metapage.c5
-rw-r--r--fs/minix/dir.c3
-rw-r--r--fs/namei.c3
-rw-r--r--fs/ntfs/aops.h3
-rw-r--r--fs/ntfs/attrib.c6
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ocfs2/symlink.c3
-rw-r--r--fs/partitions/check.c4
-rw-r--r--fs/reiserfs/xattr.c3
-rw-r--r--fs/sysv/dir.c3
20 files changed, 30 insertions, 54 deletions
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index a6dff6a4f20..2fc99877cb0 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -185,9 +185,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
185 185
186 _enter("{%lu},%lu", dir->i_ino, index); 186 _enter("{%lu},%lu", dir->i_ino, index);
187 187
188 page = read_cache_page(dir->i_mapping,index, 188 page = read_mapping_page(dir->i_mapping, index, NULL);
189 (filler_t *) dir->i_mapping->a_ops->readpage,
190 NULL);
191 if (!IS_ERR(page)) { 189 if (!IS_ERR(page)) {
192 wait_on_page_locked(page); 190 wait_on_page_locked(page);
193 kmap(page); 191 kmap(page);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 4e6eeb59b83..b5cf9e1205a 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -63,7 +63,6 @@ unsigned long afs_mntpt_expiry_timeout = 20;
63int afs_mntpt_check_symlink(struct afs_vnode *vnode) 63int afs_mntpt_check_symlink(struct afs_vnode *vnode)
64{ 64{
65 struct page *page; 65 struct page *page;
66 filler_t *filler;
67 size_t size; 66 size_t size;
68 char *buf; 67 char *buf;
69 int ret; 68 int ret;
@@ -71,10 +70,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
71 _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique); 70 _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
72 71
73 /* read the contents of the symlink into the pagecache */ 72 /* read the contents of the symlink into the pagecache */
74 filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage; 73 page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
75
76 page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
77 filler, NULL);
78 if (IS_ERR(page)) { 74 if (IS_ERR(page)) {
79 ret = PTR_ERR(page); 75 ret = PTR_ERR(page);
80 goto out; 76 goto out;
@@ -160,7 +156,6 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
160 struct page *page = NULL; 156 struct page *page = NULL;
161 size_t size; 157 size_t size;
162 char *buf, *devname = NULL, *options = NULL; 158 char *buf, *devname = NULL, *options = NULL;
163 filler_t *filler;
164 int ret; 159 int ret;
165 160
166 kenter("{%s}", mntpt->d_name.name); 161 kenter("{%s}", mntpt->d_name.name);
@@ -182,9 +177,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
182 goto error; 177 goto error;
183 178
184 /* read the contents of the AFS special symlink */ 179 /* read the contents of the AFS special symlink */
185 filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage; 180 page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
186
187 page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
188 if (IS_ERR(page)) { 181 if (IS_ERR(page)) {
189 ret = PTR_ERR(page); 182 ret = PTR_ERR(page);
190 goto error; 183 goto error;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 8a9d5d3b326..c45d7386080 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -181,9 +181,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
181 struct page *page = NULL; 181 struct page *page = NULL;
182 182
183 if (blocknr + i < devsize) { 183 if (blocknr + i < devsize) {
184 page = read_cache_page(mapping, blocknr + i, 184 page = read_mapping_page(mapping, blocknr + i, NULL);
185 (filler_t *)mapping->a_ops->readpage,
186 NULL);
187 /* synchronous error? */ 185 /* synchronous error? */
188 if (IS_ERR(page)) 186 if (IS_ERR(page))
189 page = NULL; 187 page = NULL;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d672aa9f406..3c1c9aaaca6 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -159,8 +159,7 @@ fail:
159static struct page * ext2_get_page(struct inode *dir, unsigned long n) 159static struct page * ext2_get_page(struct inode *dir, unsigned long n)
160{ 160{
161 struct address_space *mapping = dir->i_mapping; 161 struct address_space *mapping = dir->i_mapping;
162 struct page *page = read_cache_page(mapping, n, 162 struct page *page = read_mapping_page(mapping, n, NULL);
163 (filler_t*)mapping->a_ops->readpage, NULL);
164 if (!IS_ERR(page)) { 163 if (!IS_ERR(page)) {
165 wait_on_page_locked(page); 164 wait_on_page_locked(page);
166 kmap(page); 165 kmap(page);
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 50aae77651b..c1be118fc06 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -71,8 +71,7 @@ vxfs_get_page(struct address_space *mapping, u_long n)
71{ 71{
72 struct page * pp; 72 struct page * pp;
73 73
74 pp = read_cache_page(mapping, n, 74 pp = read_mapping_page(mapping, n, NULL);
75 (filler_t*)mapping->a_ops->readpage, NULL);
76 75
77 if (!IS_ERR(pp)) { 76 if (!IS_ERR(pp)) {
78 wait_on_page_locked(pp); 77 wait_on_page_locked(pp);
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 1e44dcfe49c..13231dd5ce6 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -280,7 +280,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
280 block = off >> PAGE_CACHE_SHIFT; 280 block = off >> PAGE_CACHE_SHIFT;
281 node->page_offset = off & ~PAGE_CACHE_MASK; 281 node->page_offset = off & ~PAGE_CACHE_MASK;
282 for (i = 0; i < tree->pages_per_bnode; i++) { 282 for (i = 0; i < tree->pages_per_bnode; i++) {
283 page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL); 283 page = read_mapping_page(mapping, block++, NULL);
284 if (IS_ERR(page)) 284 if (IS_ERR(page))
285 goto fail; 285 goto fail;
286 if (PageError(page)) { 286 if (PageError(page)) {
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index d20131ce4b9..40035799431 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -59,7 +59,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
59 unlock_new_inode(tree->inode); 59 unlock_new_inode(tree->inode);
60 60
61 mapping = tree->inode->i_mapping; 61 mapping = tree->inode->i_mapping;
62 page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL); 62 page = read_mapping_page(mapping, 0, NULL);
63 if (IS_ERR(page)) 63 if (IS_ERR(page))
64 goto free_tree; 64 goto free_tree;
65 65
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index 9fb51632303..d128a25b74d 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -31,8 +31,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
31 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); 31 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
32 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); 32 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
33 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; 33 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
34 page = read_cache_page(mapping, offset / PAGE_CACHE_BITS, 34 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
35 (filler_t *)mapping->a_ops->readpage, NULL);
36 pptr = kmap(page); 35 pptr = kmap(page);
37 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; 36 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
38 i = offset % 32; 37 i = offset % 32;
@@ -72,8 +71,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
72 offset += PAGE_CACHE_BITS; 71 offset += PAGE_CACHE_BITS;
73 if (offset >= size) 72 if (offset >= size)
74 break; 73 break;
75 page = read_cache_page(mapping, offset / PAGE_CACHE_BITS, 74 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
76 (filler_t *)mapping->a_ops->readpage, NULL); 75 NULL);
77 curr = pptr = kmap(page); 76 curr = pptr = kmap(page);
78 if ((size ^ offset) / PAGE_CACHE_BITS) 77 if ((size ^ offset) / PAGE_CACHE_BITS)
79 end = pptr + PAGE_CACHE_BITS / 32; 78 end = pptr + PAGE_CACHE_BITS / 32;
@@ -119,8 +118,8 @@ found:
119 set_page_dirty(page); 118 set_page_dirty(page);
120 kunmap(page); 119 kunmap(page);
121 offset += PAGE_CACHE_BITS; 120 offset += PAGE_CACHE_BITS;
122 page = read_cache_page(mapping, offset / PAGE_CACHE_BITS, 121 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
123 (filler_t *)mapping->a_ops->readpage, NULL); 122 NULL);
124 pptr = kmap(page); 123 pptr = kmap(page);
125 curr = pptr; 124 curr = pptr;
126 end = pptr + PAGE_CACHE_BITS / 32; 125 end = pptr + PAGE_CACHE_BITS / 32;
@@ -167,7 +166,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
167 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); 166 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
168 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; 167 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
169 pnr = offset / PAGE_CACHE_BITS; 168 pnr = offset / PAGE_CACHE_BITS;
170 page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL); 169 page = read_mapping_page(mapping, pnr, NULL);
171 pptr = kmap(page); 170 pptr = kmap(page);
172 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; 171 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
173 end = pptr + PAGE_CACHE_BITS / 32; 172 end = pptr + PAGE_CACHE_BITS / 32;
@@ -199,7 +198,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
199 break; 198 break;
200 set_page_dirty(page); 199 set_page_dirty(page);
201 kunmap(page); 200 kunmap(page);
202 page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL); 201 page = read_mapping_page(mapping, ++pnr, NULL);
203 pptr = kmap(page); 202 pptr = kmap(page);
204 curr = pptr; 203 curr = pptr;
205 end = pptr + PAGE_CACHE_BITS / 32; 204 end = pptr + PAGE_CACHE_BITS / 32;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 746abc9ecf7..77bf434da67 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -440,7 +440,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
440 block = off >> PAGE_CACHE_SHIFT; 440 block = off >> PAGE_CACHE_SHIFT;
441 node->page_offset = off & ~PAGE_CACHE_MASK; 441 node->page_offset = off & ~PAGE_CACHE_MASK;
442 for (i = 0; i < tree->pages_per_bnode; block++, i++) { 442 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
443 page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL); 443 page = read_mapping_page(mapping, block, NULL);
444 if (IS_ERR(page)) 444 if (IS_ERR(page))
445 goto fail; 445 goto fail;
446 if (PageError(page)) { 446 if (PageError(page)) {
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index effa8991999..cfc852fdd1b 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -38,7 +38,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
38 goto free_tree; 38 goto free_tree;
39 39
40 mapping = tree->inode->i_mapping; 40 mapping = tree->inode->i_mapping;
41 page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL); 41 page = read_mapping_page(mapping, 0, NULL);
42 if (IS_ERR(page)) 42 if (IS_ERR(page))
43 goto free_tree; 43 goto free_tree;
44 44
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 2b220dd6b4e..7f6e8803970 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -632,10 +632,9 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
632 } 632 }
633 SetPageUptodate(page); 633 SetPageUptodate(page);
634 } else { 634 } else {
635 page = read_cache_page(mapping, page_index, 635 page = read_mapping_page(mapping, page_index, NULL);
636 (filler_t *)mapping->a_ops->readpage, NULL);
637 if (IS_ERR(page) || !PageUptodate(page)) { 636 if (IS_ERR(page) || !PageUptodate(page)) {
638 jfs_err("read_cache_page failed!"); 637 jfs_err("read_mapping_page failed!");
639 return NULL; 638 return NULL;
640 } 639 }
641 lock_page(page); 640 lock_page(page);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 69224d1fe04..2b0a389d198 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -60,8 +60,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
60static struct page * dir_get_page(struct inode *dir, unsigned long n) 60static struct page * dir_get_page(struct inode *dir, unsigned long n)
61{ 61{
62 struct address_space *mapping = dir->i_mapping; 62 struct address_space *mapping = dir->i_mapping;
63 struct page *page = read_cache_page(mapping, n, 63 struct page *page = read_mapping_page(mapping, n, NULL);
64 (filler_t*)mapping->a_ops->readpage, NULL);
65 if (!IS_ERR(page)) { 64 if (!IS_ERR(page)) {
66 wait_on_page_locked(page); 65 wait_on_page_locked(page);
67 kmap(page); 66 kmap(page);
diff --git a/fs/namei.c b/fs/namei.c
index 184fe4acf82..bb4a3e40e43 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2577,8 +2577,7 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
2577{ 2577{
2578 struct page * page; 2578 struct page * page;
2579 struct address_space *mapping = dentry->d_inode->i_mapping; 2579 struct address_space *mapping = dentry->d_inode->i_mapping;
2580 page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, 2580 page = read_mapping_page(mapping, 0, NULL);
2581 NULL);
2582 if (IS_ERR(page)) 2581 if (IS_ERR(page))
2583 goto sync_fail; 2582 goto sync_fail;
2584 wait_on_page_locked(page); 2583 wait_on_page_locked(page);
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 3b74e66ca2f..325ce261a10 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -86,8 +86,7 @@ static inline void ntfs_unmap_page(struct page *page)
86static inline struct page *ntfs_map_page(struct address_space *mapping, 86static inline struct page *ntfs_map_page(struct address_space *mapping,
87 unsigned long index) 87 unsigned long index)
88{ 88{
89 struct page *page = read_cache_page(mapping, index, 89 struct page *page = read_mapping_page(mapping, index, NULL);
90 (filler_t*)mapping->a_ops->readpage, NULL);
91 90
92 if (!IS_ERR(page)) { 91 if (!IS_ERR(page)) {
93 wait_on_page_locked(page); 92 wait_on_page_locked(page);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 1663f5c3c6a..6708e1d68a9 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -2529,8 +2529,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2529 end >>= PAGE_CACHE_SHIFT; 2529 end >>= PAGE_CACHE_SHIFT;
2530 /* If there is a first partial page, need to do it the slow way. */ 2530 /* If there is a first partial page, need to do it the slow way. */
2531 if (start_ofs) { 2531 if (start_ofs) {
2532 page = read_cache_page(mapping, idx, 2532 page = read_mapping_page(mapping, idx, NULL);
2533 (filler_t*)mapping->a_ops->readpage, NULL);
2534 if (IS_ERR(page)) { 2533 if (IS_ERR(page)) {
2535 ntfs_error(vol->sb, "Failed to read first partial " 2534 ntfs_error(vol->sb, "Failed to read first partial "
2536 "page (sync error, index 0x%lx).", idx); 2535 "page (sync error, index 0x%lx).", idx);
@@ -2600,8 +2599,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2600 } 2599 }
2601 /* If there is a last partial page, need to do it the slow way. */ 2600 /* If there is a last partial page, need to do it the slow way. */
2602 if (end_ofs) { 2601 if (end_ofs) {
2603 page = read_cache_page(mapping, idx, 2602 page = read_mapping_page(mapping, idx, NULL);
2604 (filler_t*)mapping->a_ops->readpage, NULL);
2605 if (IS_ERR(page)) { 2603 if (IS_ERR(page)) {
2606 ntfs_error(vol->sb, "Failed to read last partial page " 2604 ntfs_error(vol->sb, "Failed to read last partial page "
2607 "(sync error, index 0x%lx).", idx); 2605 "(sync error, index 0x%lx).", idx);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 36e1e136bb0..88292f9e4b9 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -231,8 +231,7 @@ do_non_resident_extend:
231 * Read the page. If the page is not present, this will zero 231 * Read the page. If the page is not present, this will zero
232 * the uninitialized regions for us. 232 * the uninitialized regions for us.
233 */ 233 */
234 page = read_cache_page(mapping, index, 234 page = read_mapping_page(mapping, index, NULL);
235 (filler_t*)mapping->a_ops->readpage, NULL);
236 if (IS_ERR(page)) { 235 if (IS_ERR(page)) {
237 err = PTR_ERR(page); 236 err = PTR_ERR(page);
238 goto init_err_out; 237 goto init_err_out;
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index f6986bd79e7..0c8a1294ec9 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -64,8 +64,7 @@ static char *ocfs2_page_getlink(struct dentry * dentry,
64{ 64{
65 struct page * page; 65 struct page * page;
66 struct address_space *mapping = dentry->d_inode->i_mapping; 66 struct address_space *mapping = dentry->d_inode->i_mapping;
67 page = read_cache_page(mapping, 0, 67 page = read_mapping_page(mapping, 0, NULL);
68 (filler_t *)mapping->a_ops->readpage, NULL);
69 if (IS_ERR(page)) 68 if (IS_ERR(page))
70 goto sync_fail; 69 goto sync_fail;
71 wait_on_page_locked(page); 70 wait_on_page_locked(page);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 8851b81e7c5..cd885b23cb5 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -499,8 +499,8 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
499 struct address_space *mapping = bdev->bd_inode->i_mapping; 499 struct address_space *mapping = bdev->bd_inode->i_mapping;
500 struct page *page; 500 struct page *page;
501 501
502 page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), 502 page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
503 (filler_t *)mapping->a_ops->readpage, NULL); 503 NULL);
504 if (!IS_ERR(page)) { 504 if (!IS_ERR(page)) {
505 wait_on_page_locked(page); 505 wait_on_page_locked(page);
506 if (!PageUptodate(page)) 506 if (!PageUptodate(page))
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ffb79c48c5b..39fedaa88a0 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -452,8 +452,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
452 /* We can deadlock if we try to free dentries, 452 /* We can deadlock if we try to free dentries,
453 and an unlink/rmdir has just occured - GFP_NOFS avoids this */ 453 and an unlink/rmdir has just occured - GFP_NOFS avoids this */
454 mapping_set_gfp_mask(mapping, GFP_NOFS); 454 mapping_set_gfp_mask(mapping, GFP_NOFS);
455 page = read_cache_page(mapping, n, 455 page = read_mapping_page(mapping, n, NULL);
456 (filler_t *) mapping->a_ops->readpage, NULL);
457 if (!IS_ERR(page)) { 456 if (!IS_ERR(page)) {
458 wait_on_page_locked(page); 457 wait_on_page_locked(page);
459 kmap(page); 458 kmap(page);
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index d7074341ee8..f2bef962d30 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -53,8 +53,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
53static struct page * dir_get_page(struct inode *dir, unsigned long n) 53static struct page * dir_get_page(struct inode *dir, unsigned long n)
54{ 54{
55 struct address_space *mapping = dir->i_mapping; 55 struct address_space *mapping = dir->i_mapping;
56 struct page *page = read_cache_page(mapping, n, 56 struct page *page = read_mapping_page(mapping, n, NULL);
57 (filler_t*)mapping->a_ops->readpage, NULL);
58 if (!IS_ERR(page)) { 57 if (!IS_ERR(page)) {
59 wait_on_page_locked(page); 58 wait_on_page_locked(page);
60 kmap(page); 59 kmap(page);