aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap_xip.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-07-19 04:46:59 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:41 -0400
commit54cb8821de07f2ffcd28c380ce9b93d5784b40d7 (patch)
tree1de676534963d96af42863b20191bc9f80060dea /mm/filemap_xip.c
parentd00806b183152af6d24f46f0c33f14162ca1262a (diff)
mm: merge populate and nopage into fault (fixes nonlinear)
Nonlinear mappings are (AFAIKS) simply a virtual memory concept that encodes the virtual address -> file offset differently from linear mappings. ->populate is a layering violation because the filesystem/pagecache code should need to know anything about the virtual memory mapping. The hitch here is that the ->nopage handler didn't pass down enough information (ie. pgoff). But it is more logical to pass pgoff rather than have the ->nopage function calculate it itself anyway (because that's a similar layering violation). Having the populate handler install the pte itself is likewise a nasty thing to be doing. This patch introduces a new fault handler that replaces ->nopage and ->populate and (later) ->nopfn. Most of the old mechanism is still in place so there is a lot of duplication and nice cleanups that can be removed if everyone switches over. The rationale for doing this in the first place is that nonlinear mappings are subject to the pagefault vs invalidate/truncate race too, and it seemed stupid to duplicate the synchronisation logic rather than just consolidate the two. After this patch, MAP_NONBLOCK no longer sets up ptes for pages present in pagecache. Seems like a fringe functionality anyway. NOPAGE_REFAULT is removed. This should be implemented with ->fault, and no users have hit mainline yet. [akpm@linux-foundation.org: cleanup] [randy.dunlap@oracle.com: doc. fixes for readahead] [akpm@linux-foundation.org: build fix] Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com> Cc: Mark Fasheh <mark.fasheh@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap_xip.c')
-rw-r--r--mm/filemap_xip.c54
1 files changed, 30 insertions, 24 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 65ffc321f0c0..82f4b8e9834e 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -205,62 +205,67 @@ __xip_unmap (struct address_space * mapping,
205} 205}
206 206
207/* 207/*
208 * xip_nopage() is invoked via the vma operations vector for a 208 * xip_fault() is invoked via the vma operations vector for a
209 * mapped memory region to read in file data during a page fault. 209 * mapped memory region to read in file data during a page fault.
210 * 210 *
211 * This function is derived from filemap_nopage, but used for execute in place 211 * This function is derived from filemap_fault, but used for execute in place
212 */ 212 */
213static struct page * 213static struct page *xip_file_fault(struct vm_area_struct *area,
214xip_file_nopage(struct vm_area_struct * area, 214 struct fault_data *fdata)
215 unsigned long address,
216 int *type)
217{ 215{
218 struct file *file = area->vm_file; 216 struct file *file = area->vm_file;
219 struct address_space *mapping = file->f_mapping; 217 struct address_space *mapping = file->f_mapping;
220 struct inode *inode = mapping->host; 218 struct inode *inode = mapping->host;
221 struct page *page; 219 struct page *page;
222 unsigned long size, pgoff, endoff; 220 pgoff_t size;
223 221
224 pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) 222 /* XXX: are VM_FAULT_ codes OK? */
225 + area->vm_pgoff;
226 endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
227 + area->vm_pgoff;
228 223
229 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 224 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
230 if (pgoff >= size) 225 if (fdata->pgoff >= size) {
231 return NOPAGE_SIGBUS; 226 fdata->type = VM_FAULT_SIGBUS;
227 return NULL;
228 }
232 229
233 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); 230 page = mapping->a_ops->get_xip_page(mapping,
231 fdata->pgoff*(PAGE_SIZE/512), 0);
234 if (!IS_ERR(page)) 232 if (!IS_ERR(page))
235 goto out; 233 goto out;
236 if (PTR_ERR(page) != -ENODATA) 234 if (PTR_ERR(page) != -ENODATA) {
237 return NOPAGE_SIGBUS; 235 fdata->type = VM_FAULT_OOM;
236 return NULL;
237 }
238 238
239 /* sparse block */ 239 /* sparse block */
240 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && 240 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
241 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && 241 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
242 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { 242 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
243 /* maybe shared writable, allocate new block */ 243 /* maybe shared writable, allocate new block */
244 page = mapping->a_ops->get_xip_page (mapping, 244 page = mapping->a_ops->get_xip_page(mapping,
245 pgoff*(PAGE_SIZE/512), 1); 245 fdata->pgoff*(PAGE_SIZE/512), 1);
246 if (IS_ERR(page)) 246 if (IS_ERR(page)) {
247 return NOPAGE_SIGBUS; 247 fdata->type = VM_FAULT_SIGBUS;
248 return NULL;
249 }
248 /* unmap page at pgoff from all other vmas */ 250 /* unmap page at pgoff from all other vmas */
249 __xip_unmap(mapping, pgoff); 251 __xip_unmap(mapping, fdata->pgoff);
250 } else { 252 } else {
251 /* not shared and writable, use xip_sparse_page() */ 253 /* not shared and writable, use xip_sparse_page() */
252 page = xip_sparse_page(); 254 page = xip_sparse_page();
253 if (!page) 255 if (!page) {
254 return NOPAGE_OOM; 256 fdata->type = VM_FAULT_OOM;
257 return NULL;
258 }
255 } 259 }
256 260
257out: 261out:
262 fdata->type = VM_FAULT_MINOR;
258 page_cache_get(page); 263 page_cache_get(page);
259 return page; 264 return page;
260} 265}
261 266
262static struct vm_operations_struct xip_file_vm_ops = { 267static struct vm_operations_struct xip_file_vm_ops = {
263 .nopage = xip_file_nopage, 268 .fault = xip_file_fault,
264}; 269};
265 270
266int xip_file_mmap(struct file * file, struct vm_area_struct * vma) 271int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -269,6 +274,7 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
269 274
270 file_accessed(file); 275 file_accessed(file);
271 vma->vm_ops = &xip_file_vm_ops; 276 vma->vm_ops = &xip_file_vm_ops;
277 vma->vm_flags |= VM_CAN_NONLINEAR;
272 return 0; 278 return 0;
273} 279}
274EXPORT_SYMBOL_GPL(xip_file_mmap); 280EXPORT_SYMBOL_GPL(xip_file_mmap);