diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-16 11:46:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 11:46:22 -0500 |
commit | 5a2dd72abdae75ea2960145e0549635ce4e0be96 (patch) | |
tree | 44dba0119c75679a17215200f92ab23bdde9efc2 /mm/filemap.c | |
parent | efdc64f0c792ea744bcc9203f35b908e66d42f41 (diff) | |
parent | 7cb36b6ccdca03bd87e8faca7fd920643dd1aec7 (diff) |
Merge branch 'linus' into irq/genirq
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index f3e5f8944d17..23acefe51808 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -210,7 +210,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, | |||
210 | int ret; | 210 | int ret; |
211 | struct writeback_control wbc = { | 211 | struct writeback_control wbc = { |
212 | .sync_mode = sync_mode, | 212 | .sync_mode = sync_mode, |
213 | .nr_to_write = mapping->nrpages * 2, | 213 | .nr_to_write = LONG_MAX, |
214 | .range_start = start, | 214 | .range_start = start, |
215 | .range_end = end, | 215 | .range_end = end, |
216 | }; | 216 | }; |
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
460 | VM_BUG_ON(!PageLocked(page)); | 460 | VM_BUG_ON(!PageLocked(page)); |
461 | 461 | ||
462 | error = mem_cgroup_cache_charge(page, current->mm, | 462 | error = mem_cgroup_cache_charge(page, current->mm, |
463 | gfp_mask & ~__GFP_HIGHMEM); | 463 | gfp_mask & GFP_RECLAIM_MASK); |
464 | if (error) | 464 | if (error) |
465 | goto out; | 465 | goto out; |
466 | 466 | ||
@@ -741,7 +741,14 @@ repeat: | |||
741 | page = __page_cache_alloc(gfp_mask); | 741 | page = __page_cache_alloc(gfp_mask); |
742 | if (!page) | 742 | if (!page) |
743 | return NULL; | 743 | return NULL; |
744 | err = add_to_page_cache_lru(page, mapping, index, gfp_mask); | 744 | /* |
745 | * We want a regular kernel memory (not highmem or DMA etc) | ||
746 | * allocation for the radix tree nodes, but we need to honour | ||
747 | * the context-specific requirements the caller has asked for. | ||
748 | * GFP_RECLAIM_MASK collects those requirements. | ||
749 | */ | ||
750 | err = add_to_page_cache_lru(page, mapping, index, | ||
751 | (gfp_mask & GFP_RECLAIM_MASK)); | ||
745 | if (unlikely(err)) { | 752 | if (unlikely(err)) { |
746 | page_cache_release(page); | 753 | page_cache_release(page); |
747 | page = NULL; | 754 | page = NULL; |
@@ -950,7 +957,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) | |||
950 | return NULL; | 957 | return NULL; |
951 | } | 958 | } |
952 | page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); | 959 | page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); |
953 | if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { | 960 | if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) { |
954 | page_cache_release(page); | 961 | page_cache_release(page); |
955 | page = NULL; | 962 | page = NULL; |
956 | } | 963 | } |
@@ -1317,7 +1324,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1317 | goto out; /* skip atime */ | 1324 | goto out; /* skip atime */ |
1318 | size = i_size_read(inode); | 1325 | size = i_size_read(inode); |
1319 | if (pos < size) { | 1326 | if (pos < size) { |
1320 | retval = filemap_write_and_wait(mapping); | 1327 | retval = filemap_write_and_wait_range(mapping, pos, |
1328 | pos + iov_length(iov, nr_segs) - 1); | ||
1321 | if (!retval) { | 1329 | if (!retval) { |
1322 | retval = mapping->a_ops->direct_IO(READ, iocb, | 1330 | retval = mapping->a_ops->direct_IO(READ, iocb, |
1323 | iov, pos, nr_segs); | 1331 | iov, pos, nr_segs); |
@@ -1366,7 +1374,7 @@ do_readahead(struct address_space *mapping, struct file *filp, | |||
1366 | return 0; | 1374 | return 0; |
1367 | } | 1375 | } |
1368 | 1376 | ||
1369 | asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) | 1377 | SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count) |
1370 | { | 1378 | { |
1371 | ssize_t ret; | 1379 | ssize_t ret; |
1372 | struct file *file; | 1380 | struct file *file; |
@@ -1385,6 +1393,13 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) | |||
1385 | } | 1393 | } |
1386 | return ret; | 1394 | return ret; |
1387 | } | 1395 | } |
1396 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS | ||
1397 | asmlinkage long SyS_readahead(long fd, loff_t offset, long count) | ||
1398 | { | ||
1399 | return SYSC_readahead((int) fd, offset, (size_t) count); | ||
1400 | } | ||
1401 | SYSCALL_ALIAS(sys_readahead, SyS_readahead); | ||
1402 | #endif | ||
1388 | 1403 | ||
1389 | #ifdef CONFIG_MMU | 1404 | #ifdef CONFIG_MMU |
1390 | /** | 1405 | /** |
@@ -1530,7 +1545,6 @@ retry_find: | |||
1530 | /* | 1545 | /* |
1531 | * Found the page and have a reference on it. | 1546 | * Found the page and have a reference on it. |
1532 | */ | 1547 | */ |
1533 | mark_page_accessed(page); | ||
1534 | ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; | 1548 | ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; |
1535 | vmf->page = page; | 1549 | vmf->page = page; |
1536 | return ret | VM_FAULT_LOCKED; | 1550 | return ret | VM_FAULT_LOCKED; |
@@ -1766,7 +1780,7 @@ int should_remove_suid(struct dentry *dentry) | |||
1766 | if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) | 1780 | if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) |
1767 | kill |= ATTR_KILL_SGID; | 1781 | kill |= ATTR_KILL_SGID; |
1768 | 1782 | ||
1769 | if (unlikely(kill && !capable(CAP_FSETID))) | 1783 | if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) |
1770 | return kill; | 1784 | return kill; |
1771 | 1785 | ||
1772 | return 0; | 1786 | return 0; |
@@ -2060,18 +2074,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
2060 | if (count != ocount) | 2074 | if (count != ocount) |
2061 | *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); | 2075 | *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); |
2062 | 2076 | ||
2063 | /* | ||
2064 | * Unmap all mmappings of the file up-front. | ||
2065 | * | ||
2066 | * This will cause any pte dirty bits to be propagated into the | ||
2067 | * pageframes for the subsequent filemap_write_and_wait(). | ||
2068 | */ | ||
2069 | write_len = iov_length(iov, *nr_segs); | 2077 | write_len = iov_length(iov, *nr_segs); |
2070 | end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; | 2078 | end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; |
2071 | if (mapping_mapped(mapping)) | ||
2072 | unmap_mapping_range(mapping, pos, write_len, 0); | ||
2073 | 2079 | ||
2074 | written = filemap_write_and_wait(mapping); | 2080 | written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); |
2075 | if (written) | 2081 | if (written) |
2076 | goto out; | 2082 | goto out; |
2077 | 2083 | ||
@@ -2140,19 +2146,24 @@ EXPORT_SYMBOL(generic_file_direct_write); | |||
2140 | * Find or create a page at the given pagecache position. Return the locked | 2146 | * Find or create a page at the given pagecache position. Return the locked |
2141 | * page. This function is specifically for buffered writes. | 2147 | * page. This function is specifically for buffered writes. |
2142 | */ | 2148 | */ |
2143 | struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) | 2149 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
2150 | pgoff_t index, unsigned flags) | ||
2144 | { | 2151 | { |
2145 | int status; | 2152 | int status; |
2146 | struct page *page; | 2153 | struct page *page; |
2154 | gfp_t gfp_notmask = 0; | ||
2155 | if (flags & AOP_FLAG_NOFS) | ||
2156 | gfp_notmask = __GFP_FS; | ||
2147 | repeat: | 2157 | repeat: |
2148 | page = find_lock_page(mapping, index); | 2158 | page = find_lock_page(mapping, index); |
2149 | if (likely(page)) | 2159 | if (likely(page)) |
2150 | return page; | 2160 | return page; |
2151 | 2161 | ||
2152 | page = page_cache_alloc(mapping); | 2162 | page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); |
2153 | if (!page) | 2163 | if (!page) |
2154 | return NULL; | 2164 | return NULL; |
2155 | status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); | 2165 | status = add_to_page_cache_lru(page, mapping, index, |
2166 | GFP_KERNEL & ~gfp_notmask); | ||
2156 | if (unlikely(status)) { | 2167 | if (unlikely(status)) { |
2157 | page_cache_release(page); | 2168 | page_cache_release(page); |
2158 | if (status == -EEXIST) | 2169 | if (status == -EEXIST) |
@@ -2161,7 +2172,7 @@ repeat: | |||
2161 | } | 2172 | } |
2162 | return page; | 2173 | return page; |
2163 | } | 2174 | } |
2164 | EXPORT_SYMBOL(__grab_cache_page); | 2175 | EXPORT_SYMBOL(grab_cache_page_write_begin); |
2165 | 2176 | ||
2166 | static ssize_t generic_perform_write(struct file *file, | 2177 | static ssize_t generic_perform_write(struct file *file, |
2167 | struct iov_iter *i, loff_t pos) | 2178 | struct iov_iter *i, loff_t pos) |
@@ -2286,7 +2297,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |||
2286 | * the file data here, to try to honour O_DIRECT expectations. | 2297 | * the file data here, to try to honour O_DIRECT expectations. |
2287 | */ | 2298 | */ |
2288 | if (unlikely(file->f_flags & O_DIRECT) && written) | 2299 | if (unlikely(file->f_flags & O_DIRECT) && written) |
2289 | status = filemap_write_and_wait(mapping); | 2300 | status = filemap_write_and_wait_range(mapping, |
2301 | pos, pos + written - 1); | ||
2290 | 2302 | ||
2291 | return written ? written : status; | 2303 | return written ? written : status; |
2292 | } | 2304 | } |