aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-04-28 05:13:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:23 -0400
commit70688e4dd1647f0ceb502bbd5964fa344c5eb411 (patch)
treee0bd8c3b4b6050c067a453d800c2e87948d1abaf
parent30afcb4bd2762fa4b87b17ada9500aa46dc10b1b (diff)
xip: support non-struct page backed memory
Convert XIP to support non-struct page backed memory, using VM_MIXEDMAP for the user mappings. This requires the get_xip_page API to be changed to an address based one. Improve the API layering a little bit too, while we're here. This is required in order to support XIP filesystems on memory that isn't backed with struct page (but memory with struct page is still supported too). Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Carsten Otte <cotte@de.ibm.com> Cc: Jared Hulbert <jaredeh@gmail.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/xip.c37
-rw-r--r--fs/ext2/xip.h9
-rw-r--r--fs/open.c2
-rw-r--r--include/linux/fs.h4
-rw-r--r--mm/fadvise.c2
-rw-r--r--mm/filemap_xip.c200
-rw-r--r--mm/madvise.c2
8 files changed, 126 insertions, 132 deletions
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b8a2990bab8..687023bdfd1 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -796,7 +796,7 @@ const struct address_space_operations ext2_aops = {
796 796
797const struct address_space_operations ext2_aops_xip = { 797const struct address_space_operations ext2_aops_xip = {
798 .bmap = ext2_bmap, 798 .bmap = ext2_bmap,
799 .get_xip_page = ext2_get_xip_page, 799 .get_xip_mem = ext2_get_xip_mem,
800}; 800};
801 801
802const struct address_space_operations ext2_nobh_aops = { 802const struct address_space_operations ext2_nobh_aops = {
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index 430b4c8ee97..233f7fdbe31 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -15,26 +15,28 @@
15#include "xip.h" 15#include "xip.h"
16 16
17static inline int 17static inline int
18__inode_direct_access(struct inode *inode, sector_t sector, 18__inode_direct_access(struct inode *inode, sector_t block,
19 void **kaddr, unsigned long *pfn) 19 void **kaddr, unsigned long *pfn)
20{ 20{
21 struct block_device *bdev = inode->i_sb->s_bdev; 21 struct block_device *bdev = inode->i_sb->s_bdev;
22 struct block_device_operations *ops = bdev->bd_disk->fops; 22 struct block_device_operations *ops = bdev->bd_disk->fops;
23 sector_t sector;
24
25 sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */
23 26
24 BUG_ON(!ops->direct_access); 27 BUG_ON(!ops->direct_access);
25 return ops->direct_access(bdev, sector, kaddr, pfn); 28 return ops->direct_access(bdev, sector, kaddr, pfn);
26} 29}
27 30
28static inline int 31static inline int
29__ext2_get_sector(struct inode *inode, sector_t offset, int create, 32__ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
30 sector_t *result) 33 sector_t *result)
31{ 34{
32 struct buffer_head tmp; 35 struct buffer_head tmp;
33 int rc; 36 int rc;
34 37
35 memset(&tmp, 0, sizeof(struct buffer_head)); 38 memset(&tmp, 0, sizeof(struct buffer_head));
36 rc = ext2_get_block(inode, offset/ (PAGE_SIZE/512), &tmp, 39 rc = ext2_get_block(inode, pgoff, &tmp, create);
37 create);
38 *result = tmp.b_blocknr; 40 *result = tmp.b_blocknr;
39 41
40 /* did we get a sparse block (hole in the file)? */ 42 /* did we get a sparse block (hole in the file)? */
@@ -47,14 +49,13 @@ __ext2_get_sector(struct inode *inode, sector_t offset, int create,
47} 49}
48 50
49int 51int
50ext2_clear_xip_target(struct inode *inode, int block) 52ext2_clear_xip_target(struct inode *inode, sector_t block)
51{ 53{
52 sector_t sector = block * (PAGE_SIZE/512);
53 void *kaddr; 54 void *kaddr;
54 unsigned long pfn; 55 unsigned long pfn;
55 int rc; 56 int rc;
56 57
57 rc = __inode_direct_access(inode, sector, &kaddr, &pfn); 58 rc = __inode_direct_access(inode, block, &kaddr, &pfn);
58 if (!rc) 59 if (!rc)
59 clear_page(kaddr); 60 clear_page(kaddr);
60 return rc; 61 return rc;
@@ -72,26 +73,18 @@ void ext2_xip_verify_sb(struct super_block *sb)
72 } 73 }
73} 74}
74 75
75struct page * 76int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create,
76ext2_get_xip_page(struct address_space *mapping, sector_t offset, 77 void **kmem, unsigned long *pfn)
77 int create)
78{ 78{
79 int rc; 79 int rc;
80 void *kaddr; 80 sector_t block;
81 unsigned long pfn;
82 sector_t sector;
83 81
84 /* first, retrieve the sector number */ 82 /* first, retrieve the sector number */
85 rc = __ext2_get_sector(mapping->host, offset, create, &sector); 83 rc = __ext2_get_block(mapping->host, pgoff, create, &block);
86 if (rc) 84 if (rc)
87 goto error; 85 return rc;
88 86
89 /* retrieve address of the target data */ 87 /* retrieve address of the target data */
90 rc = __inode_direct_access 88 rc = __inode_direct_access(mapping->host, block, kmem, pfn);
91 (mapping->host, sector * (PAGE_SIZE/512), &kaddr, &pfn); 89 return rc;
92 if (!rc)
93 return pfn_to_page(pfn);
94
95 error:
96 return ERR_PTR(rc);
97} 90}
diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h
index aa85331d6c5..18b34d2f31b 100644
--- a/fs/ext2/xip.h
+++ b/fs/ext2/xip.h
@@ -7,19 +7,20 @@
7 7
8#ifdef CONFIG_EXT2_FS_XIP 8#ifdef CONFIG_EXT2_FS_XIP
9extern void ext2_xip_verify_sb (struct super_block *); 9extern void ext2_xip_verify_sb (struct super_block *);
10extern int ext2_clear_xip_target (struct inode *, int); 10extern int ext2_clear_xip_target (struct inode *, sector_t);
11 11
12static inline int ext2_use_xip (struct super_block *sb) 12static inline int ext2_use_xip (struct super_block *sb)
13{ 13{
14 struct ext2_sb_info *sbi = EXT2_SB(sb); 14 struct ext2_sb_info *sbi = EXT2_SB(sb);
15 return (sbi->s_mount_opt & EXT2_MOUNT_XIP); 15 return (sbi->s_mount_opt & EXT2_MOUNT_XIP);
16} 16}
17struct page* ext2_get_xip_page (struct address_space *, sector_t, int); 17int ext2_get_xip_mem(struct address_space *, pgoff_t, int,
18#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_page) 18 void **, unsigned long *);
19#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_mem)
19#else 20#else
20#define mapping_is_xip(map) 0 21#define mapping_is_xip(map) 0
21#define ext2_xip_verify_sb(sb) do { } while (0) 22#define ext2_xip_verify_sb(sb) do { } while (0)
22#define ext2_use_xip(sb) 0 23#define ext2_use_xip(sb) 0
23#define ext2_clear_xip_target(inode, chain) 0 24#define ext2_clear_xip_target(inode, chain) 0
24#define ext2_get_xip_page NULL 25#define ext2_get_xip_mem NULL
25#endif 26#endif
diff --git a/fs/open.c b/fs/open.c
index b70e7666bb2..7af1f05d597 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -837,7 +837,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
837 if (f->f_flags & O_DIRECT) { 837 if (f->f_flags & O_DIRECT) {
838 if (!f->f_mapping->a_ops || 838 if (!f->f_mapping->a_ops ||
839 ((!f->f_mapping->a_ops->direct_IO) && 839 ((!f->f_mapping->a_ops->direct_IO) &&
840 (!f->f_mapping->a_ops->get_xip_page))) { 840 (!f->f_mapping->a_ops->get_xip_mem))) {
841 fput(f); 841 fput(f);
842 f = ERR_PTR(-EINVAL); 842 f = ERR_PTR(-EINVAL);
843 } 843 }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bd05f567804..2c925747bc4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -474,8 +474,8 @@ struct address_space_operations {
474 int (*releasepage) (struct page *, gfp_t); 474 int (*releasepage) (struct page *, gfp_t);
475 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 475 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
476 loff_t offset, unsigned long nr_segs); 476 loff_t offset, unsigned long nr_segs);
477 struct page* (*get_xip_page)(struct address_space *, sector_t, 477 int (*get_xip_mem)(struct address_space *, pgoff_t, int,
478 int); 478 void **, unsigned long *);
479 /* migrate the contents of a page to the specified target */ 479 /* migrate the contents of a page to the specified target */
480 int (*migratepage) (struct address_space *, 480 int (*migratepage) (struct address_space *,
481 struct page *, struct page *); 481 struct page *, struct page *);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 3c0f1e99f5e..343cfdfebd9 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -49,7 +49,7 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
49 goto out; 49 goto out;
50 } 50 }
51 51
52 if (mapping->a_ops->get_xip_page) { 52 if (mapping->a_ops->get_xip_mem) {
53 switch (advice) { 53 switch (advice) {
54 case POSIX_FADV_NORMAL: 54 case POSIX_FADV_NORMAL:
55 case POSIX_FADV_RANDOM: 55 case POSIX_FADV_RANDOM:
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 5e598c42afd..3e744abcce9 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -15,6 +15,7 @@
15#include <linux/rmap.h> 15#include <linux/rmap.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/io.h>
18 19
19/* 20/*
20 * We do use our own empty page to avoid interference with other users 21 * We do use our own empty page to avoid interference with other users
@@ -42,37 +43,41 @@ static struct page *xip_sparse_page(void)
42 43
43/* 44/*
44 * This is a file read routine for execute in place files, and uses 45 * This is a file read routine for execute in place files, and uses
45 * the mapping->a_ops->get_xip_page() function for the actual low-level 46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
46 * stuff. 47 * stuff.
47 * 48 *
48 * Note the struct file* is not used at all. It may be NULL. 49 * Note the struct file* is not used at all. It may be NULL.
49 */ 50 */
50static void 51static ssize_t
51do_xip_mapping_read(struct address_space *mapping, 52do_xip_mapping_read(struct address_space *mapping,
52 struct file_ra_state *_ra, 53 struct file_ra_state *_ra,
53 struct file *filp, 54 struct file *filp,
54 loff_t *ppos, 55 char __user *buf,
55 read_descriptor_t *desc, 56 size_t len,
56 read_actor_t actor) 57 loff_t *ppos)
57{ 58{
58 struct inode *inode = mapping->host; 59 struct inode *inode = mapping->host;
59 pgoff_t index, end_index; 60 pgoff_t index, end_index;
60 unsigned long offset; 61 unsigned long offset;
61 loff_t isize; 62 loff_t isize, pos;
63 size_t copied = 0, error = 0;
62 64
63 BUG_ON(!mapping->a_ops->get_xip_page); 65 BUG_ON(!mapping->a_ops->get_xip_mem);
64 66
65 index = *ppos >> PAGE_CACHE_SHIFT; 67 pos = *ppos;
66 offset = *ppos & ~PAGE_CACHE_MASK; 68 index = pos >> PAGE_CACHE_SHIFT;
69 offset = pos & ~PAGE_CACHE_MASK;
67 70
68 isize = i_size_read(inode); 71 isize = i_size_read(inode);
69 if (!isize) 72 if (!isize)
70 goto out; 73 goto out;
71 74
72 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
73 for (;;) { 76 do {
74 struct page *page; 77 unsigned long nr, left;
75 unsigned long nr, ret; 78 void *xip_mem;
79 unsigned long xip_pfn;
80 int zero = 0;
76 81
77 /* nr is the maximum number of bytes to copy from this page */ 82 /* nr is the maximum number of bytes to copy from this page */
78 nr = PAGE_CACHE_SIZE; 83 nr = PAGE_CACHE_SIZE;
@@ -85,19 +90,17 @@ do_xip_mapping_read(struct address_space *mapping,
85 } 90 }
86 } 91 }
87 nr = nr - offset; 92 nr = nr - offset;
93 if (nr > len)
94 nr = len;
88 95
89 page = mapping->a_ops->get_xip_page(mapping, 96 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
90 index*(PAGE_SIZE/512), 0); 97 &xip_mem, &xip_pfn);
91 if (!page) 98 if (unlikely(error)) {
92 goto no_xip_page; 99 if (error == -ENODATA) {
93 if (unlikely(IS_ERR(page))) {
94 if (PTR_ERR(page) == -ENODATA) {
95 /* sparse */ 100 /* sparse */
96 page = ZERO_PAGE(0); 101 zero = 1;
97 } else { 102 } else
98 desc->error = PTR_ERR(page);
99 goto out; 103 goto out;
100 }
101 } 104 }
102 105
103 /* If users can be writing to this page using arbitrary 106 /* If users can be writing to this page using arbitrary
@@ -105,10 +108,10 @@ do_xip_mapping_read(struct address_space *mapping,
105 * before reading the page on the kernel side. 108 * before reading the page on the kernel side.
106 */ 109 */
107 if (mapping_writably_mapped(mapping)) 110 if (mapping_writably_mapped(mapping))
108 flush_dcache_page(page); 111 /* address based flush */ ;
109 112
110 /* 113 /*
111 * Ok, we have the page, so now we can copy it to user space... 114 * Ok, we have the mem, so now we can copy it to user space...
112 * 115 *
113 * The actor routine returns how many bytes were actually used.. 116 * The actor routine returns how many bytes were actually used..
114 * NOTE! This may not be the same as how much of a user buffer 117 * NOTE! This may not be the same as how much of a user buffer
@@ -116,47 +119,38 @@ do_xip_mapping_read(struct address_space *mapping,
116 * "pos" here (the actor routine has to update the user buffer 119 * "pos" here (the actor routine has to update the user buffer
117 * pointers and the remaining count). 120 * pointers and the remaining count).
118 */ 121 */
119 ret = actor(desc, page, offset, nr); 122 if (!zero)
120 offset += ret; 123 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
121 index += offset >> PAGE_CACHE_SHIFT; 124 else
122 offset &= ~PAGE_CACHE_MASK; 125 left = __clear_user(buf + copied, nr);
123 126
124 if (ret == nr && desc->count) 127 if (left) {
125 continue; 128 error = -EFAULT;
126 goto out; 129 goto out;
130 }
127 131
128no_xip_page: 132 copied += (nr - left);
129 /* Did not get the page. Report it */ 133 offset += (nr - left);
130 desc->error = -EIO; 134 index += offset >> PAGE_CACHE_SHIFT;
131 goto out; 135 offset &= ~PAGE_CACHE_MASK;
132 } 136 } while (copied < len);
133 137
134out: 138out:
135 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 139 *ppos = pos + copied;
136 if (filp) 140 if (filp)
137 file_accessed(filp); 141 file_accessed(filp);
142
143 return (copied ? copied : error);
138} 144}
139 145
140ssize_t 146ssize_t
141xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) 147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
142{ 148{
143 read_descriptor_t desc;
144
145 if (!access_ok(VERIFY_WRITE, buf, len)) 149 if (!access_ok(VERIFY_WRITE, buf, len))
146 return -EFAULT; 150 return -EFAULT;
147 151
148 desc.written = 0; 152 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
149 desc.arg.buf = buf; 153 buf, len, ppos);
150 desc.count = len;
151 desc.error = 0;
152
153 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
154 ppos, &desc, file_read_actor);
155
156 if (desc.written)
157 return desc.written;
158 else
159 return desc.error;
160} 154}
161EXPORT_SYMBOL_GPL(xip_file_read); 155EXPORT_SYMBOL_GPL(xip_file_read);
162 156
@@ -211,13 +205,16 @@ __xip_unmap (struct address_space * mapping,
211 * 205 *
212 * This function is derived from filemap_fault, but used for execute in place 206 * This function is derived from filemap_fault, but used for execute in place
213 */ 207 */
214static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf) 208static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
215{ 209{
216 struct file *file = area->vm_file; 210 struct file *file = vma->vm_file;
217 struct address_space *mapping = file->f_mapping; 211 struct address_space *mapping = file->f_mapping;
218 struct inode *inode = mapping->host; 212 struct inode *inode = mapping->host;
219 struct page *page;
220 pgoff_t size; 213 pgoff_t size;
214 void *xip_mem;
215 unsigned long xip_pfn;
216 struct page *page;
217 int error;
221 218
222 /* XXX: are VM_FAULT_ codes OK? */ 219 /* XXX: are VM_FAULT_ codes OK? */
223 220
@@ -225,35 +222,44 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
225 if (vmf->pgoff >= size) 222 if (vmf->pgoff >= size)
226 return VM_FAULT_SIGBUS; 223 return VM_FAULT_SIGBUS;
227 224
228 page = mapping->a_ops->get_xip_page(mapping, 225 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
229 vmf->pgoff*(PAGE_SIZE/512), 0); 226 &xip_mem, &xip_pfn);
230 if (!IS_ERR(page)) 227 if (likely(!error))
231 goto out; 228 goto found;
232 if (PTR_ERR(page) != -ENODATA) 229 if (error != -ENODATA)
233 return VM_FAULT_OOM; 230 return VM_FAULT_OOM;
234 231
235 /* sparse block */ 232 /* sparse block */
236 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && 233 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
237 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && 234 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
238 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { 235 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
236 int err;
237
239 /* maybe shared writable, allocate new block */ 238 /* maybe shared writable, allocate new block */
240 page = mapping->a_ops->get_xip_page(mapping, 239 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
241 vmf->pgoff*(PAGE_SIZE/512), 1); 240 &xip_mem, &xip_pfn);
242 if (IS_ERR(page)) 241 if (error)
243 return VM_FAULT_SIGBUS; 242 return VM_FAULT_SIGBUS;
244 /* unmap page at pgoff from all other vmas */ 243 /* unmap sparse mappings at pgoff from all other vmas */
245 __xip_unmap(mapping, vmf->pgoff); 244 __xip_unmap(mapping, vmf->pgoff);
245
246found:
247 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
248 xip_pfn);
249 if (err == -ENOMEM)
250 return VM_FAULT_OOM;
251 BUG_ON(err);
252 return VM_FAULT_NOPAGE;
246 } else { 253 } else {
247 /* not shared and writable, use xip_sparse_page() */ 254 /* not shared and writable, use xip_sparse_page() */
248 page = xip_sparse_page(); 255 page = xip_sparse_page();
249 if (!page) 256 if (!page)
250 return VM_FAULT_OOM; 257 return VM_FAULT_OOM;
251 }
252 258
253out: 259 page_cache_get(page);
254 page_cache_get(page); 260 vmf->page = page;
255 vmf->page = page; 261 return 0;
256 return 0; 262 }
257} 263}
258 264
259static struct vm_operations_struct xip_file_vm_ops = { 265static struct vm_operations_struct xip_file_vm_ops = {
@@ -262,11 +268,11 @@ static struct vm_operations_struct xip_file_vm_ops = {
262 268
263int xip_file_mmap(struct file * file, struct vm_area_struct * vma) 269int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
264{ 270{
265 BUG_ON(!file->f_mapping->a_ops->get_xip_page); 271 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
266 272
267 file_accessed(file); 273 file_accessed(file);
268 vma->vm_ops = &xip_file_vm_ops; 274 vma->vm_ops = &xip_file_vm_ops;
269 vma->vm_flags |= VM_CAN_NONLINEAR; 275 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
270 return 0; 276 return 0;
271} 277}
272EXPORT_SYMBOL_GPL(xip_file_mmap); 278EXPORT_SYMBOL_GPL(xip_file_mmap);
@@ -279,17 +285,17 @@ __xip_file_write(struct file *filp, const char __user *buf,
279 const struct address_space_operations *a_ops = mapping->a_ops; 285 const struct address_space_operations *a_ops = mapping->a_ops;
280 struct inode *inode = mapping->host; 286 struct inode *inode = mapping->host;
281 long status = 0; 287 long status = 0;
282 struct page *page;
283 size_t bytes; 288 size_t bytes;
284 ssize_t written = 0; 289 ssize_t written = 0;
285 290
286 BUG_ON(!mapping->a_ops->get_xip_page); 291 BUG_ON(!mapping->a_ops->get_xip_mem);
287 292
288 do { 293 do {
289 unsigned long index; 294 unsigned long index;
290 unsigned long offset; 295 unsigned long offset;
291 size_t copied; 296 size_t copied;
292 char *kaddr; 297 void *xip_mem;
298 unsigned long xip_pfn;
293 299
294 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 300 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
295 index = pos >> PAGE_CACHE_SHIFT; 301 index = pos >> PAGE_CACHE_SHIFT;
@@ -297,28 +303,22 @@ __xip_file_write(struct file *filp, const char __user *buf,
297 if (bytes > count) 303 if (bytes > count)
298 bytes = count; 304 bytes = count;
299 305
300 page = a_ops->get_xip_page(mapping, 306 status = a_ops->get_xip_mem(mapping, index, 0,
301 index*(PAGE_SIZE/512), 0); 307 &xip_mem, &xip_pfn);
302 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 308 if (status == -ENODATA) {
303 /* we allocate a new page unmap it */ 309 /* we allocate a new page unmap it */
304 page = a_ops->get_xip_page(mapping, 310 status = a_ops->get_xip_mem(mapping, index, 1,
305 index*(PAGE_SIZE/512), 1); 311 &xip_mem, &xip_pfn);
306 if (!IS_ERR(page)) 312 if (!status)
307 /* unmap page at pgoff from all other vmas */ 313 /* unmap page at pgoff from all other vmas */
308 __xip_unmap(mapping, index); 314 __xip_unmap(mapping, index);
309 } 315 }
310 316
311 if (IS_ERR(page)) { 317 if (status)
312 status = PTR_ERR(page);
313 break; 318 break;
314 }
315 319
316 fault_in_pages_readable(buf, bytes);
317 kaddr = kmap_atomic(page, KM_USER0);
318 copied = bytes - 320 copied = bytes -
319 __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); 321 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
320 kunmap_atomic(kaddr, KM_USER0);
321 flush_dcache_page(page);
322 322
323 if (likely(copied > 0)) { 323 if (likely(copied > 0)) {
324 status = copied; 324 status = copied;
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(xip_file_write);
398 398
399/* 399/*
400 * truncate a page used for execute in place 400 * truncate a page used for execute in place
401 * functionality is analog to block_truncate_page but does use get_xip_page 401 * functionality is analog to block_truncate_page but does use get_xip_mem
402 * to get the page instead of page cache 402 * to get the page instead of page cache
403 */ 403 */
404int 404int
@@ -408,9 +408,11 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
408 unsigned offset = from & (PAGE_CACHE_SIZE-1); 408 unsigned offset = from & (PAGE_CACHE_SIZE-1);
409 unsigned blocksize; 409 unsigned blocksize;
410 unsigned length; 410 unsigned length;
411 struct page *page; 411 void *xip_mem;
412 unsigned long xip_pfn;
413 int err;
412 414
413 BUG_ON(!mapping->a_ops->get_xip_page); 415 BUG_ON(!mapping->a_ops->get_xip_mem);
414 416
415 blocksize = 1 << mapping->host->i_blkbits; 417 blocksize = 1 << mapping->host->i_blkbits;
416 length = offset & (blocksize - 1); 418 length = offset & (blocksize - 1);
@@ -421,18 +423,16 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
421 423
422 length = blocksize - length; 424 length = blocksize - length;
423 425
424 page = mapping->a_ops->get_xip_page(mapping, 426 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
425 index*(PAGE_SIZE/512), 0); 427 &xip_mem, &xip_pfn);
426 if (!page) 428 if (unlikely(err)) {
427 return -ENOMEM; 429 if (err == -ENODATA)
428 if (unlikely(IS_ERR(page))) {
429 if (PTR_ERR(page) == -ENODATA)
430 /* Hole? No need to truncate */ 430 /* Hole? No need to truncate */
431 return 0; 431 return 0;
432 else 432 else
433 return PTR_ERR(page); 433 return err;
434 } 434 }
435 zero_user(page, offset, length); 435 memset(xip_mem + offset, 0, length);
436 return 0; 436 return 0;
437} 437}
438EXPORT_SYMBOL_GPL(xip_truncate_page); 438EXPORT_SYMBOL_GPL(xip_truncate_page);
diff --git a/mm/madvise.c b/mm/madvise.c
index 93ee375b38e..23a0ec3e0ea 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -112,7 +112,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
112 if (!file) 112 if (!file)
113 return -EBADF; 113 return -EBADF;
114 114
115 if (file->f_mapping->a_ops->get_xip_page) { 115 if (file->f_mapping->a_ops->get_xip_mem) {
116 /* no bad return value, but ignore advice */ 116 /* no bad return value, but ignore advice */
117 return 0; 117 return 0;
118 } 118 }