diff options
-rw-r--r-- | fs/ntfs/file.c | 26 | ||||
-rw-r--r-- | include/asm-i386/uaccess.h | 6 | ||||
-rw-r--r-- | mm/filemap.c | 8 | ||||
-rw-r--r-- | mm/filemap.h | 26 |
4 files changed, 40 insertions, 26 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 88292f9e4b9b..2e42c2dcae12 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
@@ -1358,7 +1358,7 @@ err_out: | |||
1358 | goto out; | 1358 | goto out; |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | static size_t __ntfs_copy_from_user_iovec(char *vaddr, | 1361 | static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, |
1362 | const struct iovec *iov, size_t iov_ofs, size_t bytes) | 1362 | const struct iovec *iov, size_t iov_ofs, size_t bytes) |
1363 | { | 1363 | { |
1364 | size_t total = 0; | 1364 | size_t total = 0; |
@@ -1376,10 +1376,6 @@ static size_t __ntfs_copy_from_user_iovec(char *vaddr, | |||
1376 | bytes -= len; | 1376 | bytes -= len; |
1377 | vaddr += len; | 1377 | vaddr += len; |
1378 | if (unlikely(left)) { | 1378 | if (unlikely(left)) { |
1379 | /* | ||
1380 | * Zero the rest of the target like __copy_from_user(). | ||
1381 | */ | ||
1382 | memset(vaddr, 0, bytes); | ||
1383 | total -= left; | 1379 | total -= left; |
1384 | break; | 1380 | break; |
1385 | } | 1381 | } |
@@ -1420,11 +1416,13 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp, | |||
1420 | * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s | 1416 | * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s |
1421 | * single-segment behaviour. | 1417 | * single-segment behaviour. |
1422 | * | 1418 | * |
1423 | * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and | 1419 | * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both |
1424 | * when not atomic. This is ok because __ntfs_copy_from_user_iovec() calls | 1420 | * when atomic and when not atomic. This is ok because |
1425 | * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In | 1421 | * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() |
1426 | * fact, the only difference between __copy_from_user_inatomic() and | 1422 | * and it is ok to call this when non-atomic. |
1427 | * __copy_from_user() is that the latter calls might_sleep(). And on many | 1423 | * Infact, the only difference between __copy_from_user_inatomic() and |
1424 | * __copy_from_user() is that the latter calls might_sleep() and the former | ||
1425 | * should not zero the tail of the buffer on error. And on many | ||
1428 | * architectures __copy_from_user_inatomic() is just defined to | 1426 | * architectures __copy_from_user_inatomic() is just defined to |
1429 | * __copy_from_user() so it makes no difference at all on those architectures. | 1427 | * __copy_from_user() so it makes no difference at all on those architectures. |
1430 | */ | 1428 | */ |
@@ -1441,14 +1439,18 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, | |||
1441 | if (len > bytes) | 1439 | if (len > bytes) |
1442 | len = bytes; | 1440 | len = bytes; |
1443 | kaddr = kmap_atomic(*pages, KM_USER0); | 1441 | kaddr = kmap_atomic(*pages, KM_USER0); |
1444 | copied = __ntfs_copy_from_user_iovec(kaddr + ofs, | 1442 | copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, |
1445 | *iov, *iov_ofs, len); | 1443 | *iov, *iov_ofs, len); |
1446 | kunmap_atomic(kaddr, KM_USER0); | 1444 | kunmap_atomic(kaddr, KM_USER0); |
1447 | if (unlikely(copied != len)) { | 1445 | if (unlikely(copied != len)) { |
1448 | /* Do it the slow way. */ | 1446 | /* Do it the slow way. */ |
1449 | kaddr = kmap(*pages); | 1447 | kaddr = kmap(*pages); |
1450 | copied = __ntfs_copy_from_user_iovec(kaddr + ofs, | 1448 | copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, |
1451 | *iov, *iov_ofs, len); | 1449 | *iov, *iov_ofs, len); |
1450 | /* | ||
1451 | * Zero the rest of the target like __copy_from_user(). | ||
1452 | */ | ||
1453 | memset(kaddr + ofs + copied, 0, len - copied); | ||
1452 | kunmap(*pages); | 1454 | kunmap(*pages); |
1453 | if (unlikely(copied != len)) | 1455 | if (unlikely(copied != len)) |
1454 | goto err_out; | 1456 | goto err_out; |
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 8462f8e0e658..d0d253277be5 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
@@ -458,6 +458,12 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) | |||
458 | * | 458 | * |
459 | * If some data could not be copied, this function will pad the copied | 459 | * If some data could not be copied, this function will pad the copied |
460 | * data to the requested size using zero bytes. | 460 | * data to the requested size using zero bytes. |
461 | * | ||
462 | * An alternate version - __copy_from_user_inatomic() - may be called from | ||
463 | * atomic context and will fail rather than sleep. In this case the | ||
464 | * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h | ||
465 | * for explanation of why this is needed. | ||
466 | * FIXME this isn't implimented yet EMXIF | ||
461 | */ | 467 | */ |
462 | static __always_inline unsigned long | 468 | static __always_inline unsigned long |
463 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | 469 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
diff --git a/mm/filemap.c b/mm/filemap.c index 807a463fd5ed..1ed4be2a7654 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1892,7 +1892,7 @@ int remove_suid(struct dentry *dentry) | |||
1892 | EXPORT_SYMBOL(remove_suid); | 1892 | EXPORT_SYMBOL(remove_suid); |
1893 | 1893 | ||
1894 | size_t | 1894 | size_t |
1895 | __filemap_copy_from_user_iovec(char *vaddr, | 1895 | __filemap_copy_from_user_iovec_inatomic(char *vaddr, |
1896 | const struct iovec *iov, size_t base, size_t bytes) | 1896 | const struct iovec *iov, size_t base, size_t bytes) |
1897 | { | 1897 | { |
1898 | size_t copied = 0, left = 0; | 1898 | size_t copied = 0, left = 0; |
@@ -1908,12 +1908,8 @@ __filemap_copy_from_user_iovec(char *vaddr, | |||
1908 | vaddr += copy; | 1908 | vaddr += copy; |
1909 | iov++; | 1909 | iov++; |
1910 | 1910 | ||
1911 | if (unlikely(left)) { | 1911 | if (unlikely(left)) |
1912 | /* zero the rest of the target like __copy_from_user */ | ||
1913 | if (bytes) | ||
1914 | memset(vaddr, 0, bytes); | ||
1915 | break; | 1912 | break; |
1916 | } | ||
1917 | } | 1913 | } |
1918 | return copied - left; | 1914 | return copied - left; |
1919 | } | 1915 | } |
diff --git a/mm/filemap.h b/mm/filemap.h index 5683cde22055..536979fb4ba7 100644 --- a/mm/filemap.h +++ b/mm/filemap.h | |||
@@ -16,15 +16,23 @@ | |||
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | 17 | ||
18 | size_t | 18 | size_t |
19 | __filemap_copy_from_user_iovec(char *vaddr, | 19 | __filemap_copy_from_user_iovec_inatomic(char *vaddr, |
20 | const struct iovec *iov, | 20 | const struct iovec *iov, |
21 | size_t base, | 21 | size_t base, |
22 | size_t bytes); | 22 | size_t bytes); |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Copy as much as we can into the page and return the number of bytes which | 25 | * Copy as much as we can into the page and return the number of bytes which |
26 | * were sucessfully copied. If a fault is encountered then clear the page | 26 | * were sucessfully copied. If a fault is encountered then clear the page |
27 | * out to (offset+bytes) and return the number of bytes which were copied. | 27 | * out to (offset+bytes) and return the number of bytes which were copied. |
28 | * | ||
29 | * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache | ||
30 | * to *NOT* zero any tail of the buffer that it failed to copy. If it does, | ||
31 | * and if the following non-atomic copy succeeds, then there is a small window | ||
32 | * where the target page contains neither the data before the write, nor the | ||
33 | * data after the write (it contains zero). A read at this time will see | ||
34 | * data that is inconsistent with any ordering of the read and the write. | ||
35 | * (This has been detected in practice). | ||
28 | */ | 36 | */ |
29 | static inline size_t | 37 | static inline size_t |
30 | filemap_copy_from_user(struct page *page, unsigned long offset, | 38 | filemap_copy_from_user(struct page *page, unsigned long offset, |
@@ -60,13 +68,15 @@ filemap_copy_from_user_iovec(struct page *page, unsigned long offset, | |||
60 | size_t copied; | 68 | size_t copied; |
61 | 69 | ||
62 | kaddr = kmap_atomic(page, KM_USER0); | 70 | kaddr = kmap_atomic(page, KM_USER0); |
63 | copied = __filemap_copy_from_user_iovec(kaddr + offset, iov, | 71 | copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, |
64 | base, bytes); | 72 | base, bytes); |
65 | kunmap_atomic(kaddr, KM_USER0); | 73 | kunmap_atomic(kaddr, KM_USER0); |
66 | if (copied != bytes) { | 74 | if (copied != bytes) { |
67 | kaddr = kmap(page); | 75 | kaddr = kmap(page); |
68 | copied = __filemap_copy_from_user_iovec(kaddr + offset, iov, | 76 | copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, |
69 | base, bytes); | 77 | base, bytes); |
78 | if (bytes - copied) | ||
79 | memset(kaddr + offset + copied, 0, bytes - copied); | ||
70 | kunmap(page); | 80 | kunmap(page); |
71 | } | 81 | } |
72 | return copied; | 82 | return copied; |