aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ext2/xip.c81
-rw-r--r--mm/filemap_xip.c23
2 files changed, 55 insertions, 49 deletions
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index d44431d1a338..0aa5ac159c09 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -15,66 +15,79 @@
15#include "xip.h" 15#include "xip.h"
16 16
17static inline int 17static inline int
18__inode_direct_access(struct inode *inode, sector_t sector, unsigned long *data) { 18__inode_direct_access(struct inode *inode, sector_t sector,
19 unsigned long *data)
20{
19 BUG_ON(!inode->i_sb->s_bdev->bd_disk->fops->direct_access); 21 BUG_ON(!inode->i_sb->s_bdev->bd_disk->fops->direct_access);
20 return inode->i_sb->s_bdev->bd_disk->fops 22 return inode->i_sb->s_bdev->bd_disk->fops
21 ->direct_access(inode->i_sb->s_bdev,sector,data); 23 ->direct_access(inode->i_sb->s_bdev,sector,data);
22} 24}
23 25
26static inline int
27__ext2_get_sector(struct inode *inode, sector_t offset, int create,
28 sector_t *result)
29{
30 struct buffer_head tmp;
31 int rc;
32
33 memset(&tmp, 0, sizeof(struct buffer_head));
34 rc = ext2_get_block(inode, offset/ (PAGE_SIZE/512), &tmp,
35 create);
36 *result = tmp.b_blocknr;
37
38 /* did we get a sparse block (hole in the file)? */
39 if (!(*result)) {
40 BUG_ON(create);
41 rc = -ENODATA;
42 }
43
44 return rc;
45}
46
24int 47int
25ext2_clear_xip_target(struct inode *inode, int block) { 48ext2_clear_xip_target(struct inode *inode, int block)
26 sector_t sector = block*(PAGE_SIZE/512); 49{
50 sector_t sector = block * (PAGE_SIZE/512);
27 unsigned long data; 51 unsigned long data;
28 int rc; 52 int rc;
29 53
30 rc = __inode_direct_access(inode, sector, &data); 54 rc = __inode_direct_access(inode, sector, &data);
31 if (rc) 55 if (!rc)
32 return rc; 56 clear_page((void*)data);
33 clear_page((void*)data); 57 return rc;
34 return 0;
35} 58}
36 59
37void ext2_xip_verify_sb(struct super_block *sb) 60void ext2_xip_verify_sb(struct super_block *sb)
38{ 61{
39 struct ext2_sb_info *sbi = EXT2_SB(sb); 62 struct ext2_sb_info *sbi = EXT2_SB(sb);
40 63
41 if ((sbi->s_mount_opt & EXT2_MOUNT_XIP)) { 64 if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) &&
42 if ((sb->s_bdev == NULL) || 65 !sb->s_bdev->bd_disk->fops->direct_access) {
43 sb->s_bdev->bd_disk == NULL || 66 sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
44 sb->s_bdev->bd_disk->fops == NULL || 67 ext2_warning(sb, __FUNCTION__,
45 sb->s_bdev->bd_disk->fops->direct_access == NULL) { 68 "ignoring xip option - not supported by bdev");
46 sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
47 ext2_warning(sb, __FUNCTION__,
48 "ignoring xip option - not supported by bdev");
49 }
50 } 69 }
51} 70}
52 71
53struct page* 72struct page *
54ext2_get_xip_page(struct address_space *mapping, sector_t blockno, 73ext2_get_xip_page(struct address_space *mapping, sector_t offset,
55 int create) 74 int create)
56{ 75{
57 int rc; 76 int rc;
58 unsigned long data; 77 unsigned long data;
59 struct buffer_head tmp; 78 sector_t sector;
60 79
61 tmp.b_state = 0; 80 /* first, retrieve the sector number */
62 tmp.b_blocknr = 0; 81 rc = __ext2_get_sector(mapping->host, offset, create, &sector);
63 rc = ext2_get_block(mapping->host, blockno/(PAGE_SIZE/512) , &tmp,
64 create);
65 if (rc) 82 if (rc)
66 return ERR_PTR(rc); 83 goto error;
67 if (tmp.b_blocknr == 0) {
68 /* SPARSE block */
69 BUG_ON(create);
70 return ERR_PTR(-ENODATA);
71 }
72 84
85 /* retrieve address of the target data */
73 rc = __inode_direct_access 86 rc = __inode_direct_access
74 (mapping->host,tmp.b_blocknr*(PAGE_SIZE/512) ,&data); 87 (mapping->host, sector * (PAGE_SIZE/512), &data);
75 if (rc) 88 if (!rc)
76 return ERR_PTR(rc); 89 return virt_to_page(data);
77 90
78 SetPageUptodate(virt_to_page(data)); 91 error:
79 return virt_to_page(data); 92 return ERR_PTR(rc);
80} 93}
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 4553b2c5aab4..8c199f537732 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping,
68 if (unlikely(IS_ERR(page))) { 68 if (unlikely(IS_ERR(page))) {
69 if (PTR_ERR(page) == -ENODATA) { 69 if (PTR_ERR(page) == -ENODATA) {
70 /* sparse */ 70 /* sparse */
71 page = virt_to_page(empty_zero_page); 71 page = ZERO_PAGE(0);
72 } else { 72 } else {
73 desc->error = PTR_ERR(page); 73 desc->error = PTR_ERR(page);
74 goto out; 74 goto out;
75 } 75 }
76 } else 76 }
77 BUG_ON(!PageUptodate(page));
78 77
79 /* If users can be writing to this page using arbitrary 78 /* If users can be writing to this page using arbitrary
80 * virtual addresses, take care about potential aliasing 79 * virtual addresses, take care about potential aliasing
@@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping,
84 flush_dcache_page(page); 83 flush_dcache_page(page);
85 84
86 /* 85 /*
87 * Ok, we have the page, and it's up-to-date, so 86 * Ok, we have the page, so now we can copy it to user space...
88 * now we can copy it to user space...
89 * 87 *
90 * The actor routine returns how many bytes were actually used.. 88 * The actor routine returns how many bytes were actually used..
91 * NOTE! This may not be the same as how much of a user buffer 89 * NOTE! This may not be the same as how much of a user buffer
@@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile);
164 * xip_write 162 * xip_write
165 * 163 *
166 * This function walks all vmas of the address_space and unmaps the 164 * This function walks all vmas of the address_space and unmaps the
167 * empty_zero_page when found at pgoff. Should it go in rmap.c? 165 * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
168 */ 166 */
169static void 167static void
170__xip_unmap (struct address_space * mapping, 168__xip_unmap (struct address_space * mapping,
@@ -187,7 +185,7 @@ __xip_unmap (struct address_space * mapping,
187 * We need the page_table_lock to protect us from page faults, 185 * We need the page_table_lock to protect us from page faults,
188 * munmap, fork, etc... 186 * munmap, fork, etc...
189 */ 187 */
190 pte = page_check_address(virt_to_page(empty_zero_page), mm, 188 pte = page_check_address(ZERO_PAGE(address), mm,
191 address); 189 address);
192 if (!IS_ERR(pte)) { 190 if (!IS_ERR(pte)) {
193 /* Nuke the page table entry. */ 191 /* Nuke the page table entry. */
@@ -230,7 +228,6 @@ xip_file_nopage(struct vm_area_struct * area,
230 228
231 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); 229 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
232 if (!IS_ERR(page)) { 230 if (!IS_ERR(page)) {
233 BUG_ON(!PageUptodate(page));
234 return page; 231 return page;
235 } 232 }
236 if (PTR_ERR(page) != -ENODATA) 233 if (PTR_ERR(page) != -ENODATA)
@@ -245,12 +242,11 @@ xip_file_nopage(struct vm_area_struct * area,
245 pgoff*(PAGE_SIZE/512), 1); 242 pgoff*(PAGE_SIZE/512), 1);
246 if (IS_ERR(page)) 243 if (IS_ERR(page))
247 return NULL; 244 return NULL;
248 BUG_ON(!PageUptodate(page));
249 /* unmap page at pgoff from all other vmas */ 245 /* unmap page at pgoff from all other vmas */
250 __xip_unmap(mapping, pgoff); 246 __xip_unmap(mapping, pgoff);
251 } else { 247 } else {
252 /* not shared and writable, use empty_zero_page */ 248 /* not shared and writable, use ZERO_PAGE() */
253 page = virt_to_page(empty_zero_page); 249 page = ZERO_PAGE(address);
254 } 250 }
255 251
256 return page; 252 return page;
@@ -319,8 +315,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
319 break; 315 break;
320 } 316 }
321 317
322 BUG_ON(!PageUptodate(page));
323
324 copied = filemap_copy_from_user(page, offset, buf, bytes); 318 copied = filemap_copy_from_user(page, offset, buf, bytes);
325 flush_dcache_page(page); 319 flush_dcache_page(page);
326 if (likely(copied > 0)) { 320 if (likely(copied > 0)) {
@@ -435,8 +429,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
435 return 0; 429 return 0;
436 else 430 else
437 return PTR_ERR(page); 431 return PTR_ERR(page);
438 } else 432 }
439 BUG_ON(!PageUptodate(page));
440 kaddr = kmap_atomic(page, KM_USER0); 433 kaddr = kmap_atomic(page, KM_USER0);
441 memset(kaddr + offset, 0, length); 434 memset(kaddr + offset, 0, length);
442 kunmap_atomic(kaddr, KM_USER0); 435 kunmap_atomic(kaddr, KM_USER0);