aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c242
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/vmalloc.c3
5 files changed, 14 insertions, 245 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ab8553658af3..f3e5f8944d17 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2029,48 +2029,8 @@ int pagecache_write_begin(struct file *file, struct address_space *mapping,
2029{ 2029{
2030 const struct address_space_operations *aops = mapping->a_ops; 2030 const struct address_space_operations *aops = mapping->a_ops;
2031 2031
2032 if (aops->write_begin) { 2032 return aops->write_begin(file, mapping, pos, len, flags,
2033 return aops->write_begin(file, mapping, pos, len, flags,
2034 pagep, fsdata); 2033 pagep, fsdata);
2035 } else {
2036 int ret;
2037 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2038 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2039 struct inode *inode = mapping->host;
2040 struct page *page;
2041again:
2042 page = __grab_cache_page(mapping, index);
2043 *pagep = page;
2044 if (!page)
2045 return -ENOMEM;
2046
2047 if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
2048 /*
2049 * There is no way to resolve a short write situation
2050 * for a !Uptodate page (except by double copying in
2051 * the caller done by generic_perform_write_2copy).
2052 *
2053 * Instead, we have to bring it uptodate here.
2054 */
2055 ret = aops->readpage(file, page);
2056 page_cache_release(page);
2057 if (ret) {
2058 if (ret == AOP_TRUNCATED_PAGE)
2059 goto again;
2060 return ret;
2061 }
2062 goto again;
2063 }
2064
2065 ret = aops->prepare_write(file, page, offset, offset+len);
2066 if (ret) {
2067 unlock_page(page);
2068 page_cache_release(page);
2069 if (pos + len > inode->i_size)
2070 vmtruncate(inode, inode->i_size);
2071 }
2072 return ret;
2073 }
2074} 2034}
2075EXPORT_SYMBOL(pagecache_write_begin); 2035EXPORT_SYMBOL(pagecache_write_begin);
2076 2036
@@ -2079,32 +2039,9 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
2079 struct page *page, void *fsdata) 2039 struct page *page, void *fsdata)
2080{ 2040{
2081 const struct address_space_operations *aops = mapping->a_ops; 2041 const struct address_space_operations *aops = mapping->a_ops;
2082 int ret;
2083
2084 if (aops->write_end) {
2085 mark_page_accessed(page);
2086 ret = aops->write_end(file, mapping, pos, len, copied,
2087 page, fsdata);
2088 } else {
2089 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2090 struct inode *inode = mapping->host;
2091
2092 flush_dcache_page(page);
2093 ret = aops->commit_write(file, page, offset, offset+len);
2094 unlock_page(page);
2095 mark_page_accessed(page);
2096 page_cache_release(page);
2097
2098 if (ret < 0) {
2099 if (pos + len > inode->i_size)
2100 vmtruncate(inode, inode->i_size);
2101 } else if (ret > 0)
2102 ret = min_t(size_t, copied, ret);
2103 else
2104 ret = copied;
2105 }
2106 2042
2107 return ret; 2043 mark_page_accessed(page);
2044 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2108} 2045}
2109EXPORT_SYMBOL(pagecache_write_end); 2046EXPORT_SYMBOL(pagecache_write_end);
2110 2047
@@ -2226,174 +2163,6 @@ repeat:
2226} 2163}
2227EXPORT_SYMBOL(__grab_cache_page); 2164EXPORT_SYMBOL(__grab_cache_page);
2228 2165
2229static ssize_t generic_perform_write_2copy(struct file *file,
2230 struct iov_iter *i, loff_t pos)
2231{
2232 struct address_space *mapping = file->f_mapping;
2233 const struct address_space_operations *a_ops = mapping->a_ops;
2234 struct inode *inode = mapping->host;
2235 long status = 0;
2236 ssize_t written = 0;
2237
2238 do {
2239 struct page *src_page;
2240 struct page *page;
2241 pgoff_t index; /* Pagecache index for current page */
2242 unsigned long offset; /* Offset into pagecache page */
2243 unsigned long bytes; /* Bytes to write to page */
2244 size_t copied; /* Bytes copied from user */
2245
2246 offset = (pos & (PAGE_CACHE_SIZE - 1));
2247 index = pos >> PAGE_CACHE_SHIFT;
2248 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2249 iov_iter_count(i));
2250
2251 /*
2252 * a non-NULL src_page indicates that we're doing the
2253 * copy via get_user_pages and kmap.
2254 */
2255 src_page = NULL;
2256
2257 /*
2258 * Bring in the user page that we will copy from _first_.
2259 * Otherwise there's a nasty deadlock on copying from the
2260 * same page as we're writing to, without it being marked
2261 * up-to-date.
2262 *
2263 * Not only is this an optimisation, but it is also required
2264 * to check that the address is actually valid, when atomic
2265 * usercopies are used, below.
2266 */
2267 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2268 status = -EFAULT;
2269 break;
2270 }
2271
2272 page = __grab_cache_page(mapping, index);
2273 if (!page) {
2274 status = -ENOMEM;
2275 break;
2276 }
2277
2278 /*
2279 * non-uptodate pages cannot cope with short copies, and we
2280 * cannot take a pagefault with the destination page locked.
2281 * So pin the source page to copy it.
2282 */
2283 if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
2284 unlock_page(page);
2285
2286 src_page = alloc_page(GFP_KERNEL);
2287 if (!src_page) {
2288 page_cache_release(page);
2289 status = -ENOMEM;
2290 break;
2291 }
2292
2293 /*
2294 * Cannot get_user_pages with a page locked for the
2295 * same reason as we can't take a page fault with a
2296 * page locked (as explained below).
2297 */
2298 copied = iov_iter_copy_from_user(src_page, i,
2299 offset, bytes);
2300 if (unlikely(copied == 0)) {
2301 status = -EFAULT;
2302 page_cache_release(page);
2303 page_cache_release(src_page);
2304 break;
2305 }
2306 bytes = copied;
2307
2308 lock_page(page);
2309 /*
2310 * Can't handle the page going uptodate here, because
2311 * that means we would use non-atomic usercopies, which
2312 * zero out the tail of the page, which can cause
2313 * zeroes to become transiently visible. We could just
2314 * use a non-zeroing copy, but the APIs aren't too
2315 * consistent.
2316 */
2317 if (unlikely(!page->mapping || PageUptodate(page))) {
2318 unlock_page(page);
2319 page_cache_release(page);
2320 page_cache_release(src_page);
2321 continue;
2322 }
2323 }
2324
2325 status = a_ops->prepare_write(file, page, offset, offset+bytes);
2326 if (unlikely(status))
2327 goto fs_write_aop_error;
2328
2329 if (!src_page) {
2330 /*
2331 * Must not enter the pagefault handler here, because
2332 * we hold the page lock, so we might recursively
2333 * deadlock on the same lock, or get an ABBA deadlock
2334 * against a different lock, or against the mmap_sem
2335 * (which nests outside the page lock). So increment
2336 * preempt count, and use _atomic usercopies.
2337 *
2338 * The page is uptodate so we are OK to encounter a
2339 * short copy: if unmodified parts of the page are
2340 * marked dirty and written out to disk, it doesn't
2341 * really matter.
2342 */
2343 pagefault_disable();
2344 copied = iov_iter_copy_from_user_atomic(page, i,
2345 offset, bytes);
2346 pagefault_enable();
2347 } else {
2348 void *src, *dst;
2349 src = kmap_atomic(src_page, KM_USER0);
2350 dst = kmap_atomic(page, KM_USER1);
2351 memcpy(dst + offset, src + offset, bytes);
2352 kunmap_atomic(dst, KM_USER1);
2353 kunmap_atomic(src, KM_USER0);
2354 copied = bytes;
2355 }
2356 flush_dcache_page(page);
2357
2358 status = a_ops->commit_write(file, page, offset, offset+bytes);
2359 if (unlikely(status < 0))
2360 goto fs_write_aop_error;
2361 if (unlikely(status > 0)) /* filesystem did partial write */
2362 copied = min_t(size_t, copied, status);
2363
2364 unlock_page(page);
2365 mark_page_accessed(page);
2366 page_cache_release(page);
2367 if (src_page)
2368 page_cache_release(src_page);
2369
2370 iov_iter_advance(i, copied);
2371 pos += copied;
2372 written += copied;
2373
2374 balance_dirty_pages_ratelimited(mapping);
2375 cond_resched();
2376 continue;
2377
2378fs_write_aop_error:
2379 unlock_page(page);
2380 page_cache_release(page);
2381 if (src_page)
2382 page_cache_release(src_page);
2383
2384 /*
2385 * prepare_write() may have instantiated a few blocks
2386 * outside i_size. Trim these off again. Don't need
2387 * i_size_read because we hold i_mutex.
2388 */
2389 if (pos + bytes > inode->i_size)
2390 vmtruncate(inode, inode->i_size);
2391 break;
2392 } while (iov_iter_count(i));
2393
2394 return written ? written : status;
2395}
2396
2397static ssize_t generic_perform_write(struct file *file, 2166static ssize_t generic_perform_write(struct file *file,
2398 struct iov_iter *i, loff_t pos) 2167 struct iov_iter *i, loff_t pos)
2399{ 2168{
@@ -2494,10 +2263,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2494 struct iov_iter i; 2263 struct iov_iter i;
2495 2264
2496 iov_iter_init(&i, iov, nr_segs, count, written); 2265 iov_iter_init(&i, iov, nr_segs, count, written);
2497 if (a_ops->write_begin) 2266 status = generic_perform_write(file, &i, pos);
2498 status = generic_perform_write(file, &i, pos);
2499 else
2500 status = generic_perform_write_2copy(file, &i, pos);
2501 2267
2502 if (likely(status >= 0)) { 2268 if (likely(status >= 0)) {
2503 written += status; 2269 written += status;
diff --git a/mm/mmap.c b/mm/mmap.c
index 74f4d158022e..de14ac21e5b5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -175,7 +175,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
175 175
176 /* Don't let a single process grow too big: 176 /* Don't let a single process grow too big:
177 leave 3% of the size of this process for other processes */ 177 leave 3% of the size of this process for other processes */
178 allowed -= mm->total_vm / 32; 178 if (mm)
179 allowed -= mm->total_vm / 32;
179 180
180 /* 181 /*
181 * cast `allowed' as a signed long because vm_committed_space 182 * cast `allowed' as a signed long because vm_committed_space
diff --git a/mm/nommu.c b/mm/nommu.c
index 2696b24f2bb3..7695dc850785 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1454,7 +1454,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1454 1454
1455 /* Don't let a single process grow too big: 1455 /* Don't let a single process grow too big:
1456 leave 3% of the size of this process for other processes */ 1456 leave 3% of the size of this process for other processes */
1457 allowed -= current->mm->total_vm / 32; 1457 if (mm)
1458 allowed -= mm->total_vm / 32;
1458 1459
1459 /* 1460 /*
1460 * cast `allowed' as a signed long because vm_committed_space 1461 * cast `allowed' as a signed long because vm_committed_space
diff --git a/mm/shmem.c b/mm/shmem.c
index d38d7e61fcd0..0ed075215e5f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -161,8 +161,8 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
161 */ 161 */
162static inline int shmem_acct_size(unsigned long flags, loff_t size) 162static inline int shmem_acct_size(unsigned long flags, loff_t size)
163{ 163{
164 return (flags & VM_ACCOUNT)? 164 return (flags & VM_ACCOUNT) ?
165 security_vm_enough_memory(VM_ACCT(size)): 0; 165 security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
166} 166}
167 167
168static inline void shmem_unacct_size(unsigned long flags, loff_t size) 168static inline void shmem_unacct_size(unsigned long flags, loff_t size)
@@ -179,8 +179,8 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
179 */ 179 */
180static inline int shmem_acct_block(unsigned long flags) 180static inline int shmem_acct_block(unsigned long flags)
181{ 181{
182 return (flags & VM_ACCOUNT)? 182 return (flags & VM_ACCOUNT) ?
183 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); 183 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
184} 184}
185 185
186static inline void shmem_unacct_blocks(unsigned long flags, long pages) 186static inline void shmem_unacct_blocks(unsigned long flags, long pages)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 036536945dd9..f1cc03bbf6ac 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -897,7 +897,8 @@ EXPORT_SYMBOL(vm_unmap_ram);
897 * @count: number of pages 897 * @count: number of pages
898 * @node: prefer to allocate data structures on this node 898 * @node: prefer to allocate data structures on this node
899 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 899 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
900 * @returns: a pointer to the address that has been mapped, or NULL on failure 900 *
901 * Returns: a pointer to the address that has been mapped, or %NULL on failure
901 */ 902 */
902void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 903void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
903{ 904{