aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 72940fb38666..1cc5467cf36c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file,
2473 iov_iter_count(i)); 2473 iov_iter_count(i));
2474 2474
2475again: 2475again:
2476 /*
2477 * Bring in the user page that we will copy from _first_.
2478 * Otherwise there's a nasty deadlock on copying from the
2479 * same page as we're writing to, without it being marked
2480 * up-to-date.
2481 *
2482 * Not only is this an optimisation, but it is also required
2483 * to check that the address is actually valid, when atomic
2484 * usercopies are used, below.
2485 */
2486 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2487 status = -EFAULT;
2488 break;
2489 }
2490
2476 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2491 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2477 &page, &fsdata); 2492 &page, &fsdata);
2478 if (unlikely(status < 0)) 2493 if (unlikely(status < 0))
@@ -2480,17 +2495,8 @@ again:
2480 2495
2481 if (mapping_writably_mapped(mapping)) 2496 if (mapping_writably_mapped(mapping))
2482 flush_dcache_page(page); 2497 flush_dcache_page(page);
2483 /* 2498
2484 * 'page' is now locked. If we are trying to copy from a
2485 * mapping of 'page' in userspace, the copy might fault and
2486 * would need PageUptodate() to complete. But, page can not be
2487 * made Uptodate without acquiring the page lock, which we hold.
2488 * Deadlock. Avoid with pagefault_disable(). Fix up below with
2489 * iov_iter_fault_in_readable().
2490 */
2491 pagefault_disable();
2492 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2499 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2493 pagefault_enable();
2494 flush_dcache_page(page); 2500 flush_dcache_page(page);
2495 2501
2496 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2502 status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,14 +2519,6 @@ again:
2513 */ 2519 */
2514 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2520 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2515 iov_iter_single_seg_count(i)); 2521 iov_iter_single_seg_count(i));
2516 /*
2517 * This is the fallback to recover if the copy from
2518 * userspace above faults.
2519 */
2520 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2521 status = -EFAULT;
2522 break;
2523 }
2524 goto again; 2522 goto again;
2525 } 2523 }
2526 pos += copied; 2524 pos += copied;