diff options
Diffstat (limited to 'fs/splice.c')
| -rw-r--r-- | fs/splice.c | 47 |
1 files changed, 6 insertions, 41 deletions
diff --git a/fs/splice.c b/fs/splice.c index 399442179d89..1bbc6f4bb09c 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -371,7 +371,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, | |||
| 371 | * for an in-flight io page | 371 | * for an in-flight io page |
| 372 | */ | 372 | */ |
| 373 | if (flags & SPLICE_F_NONBLOCK) { | 373 | if (flags & SPLICE_F_NONBLOCK) { |
| 374 | if (TestSetPageLocked(page)) { | 374 | if (!trylock_page(page)) { |
| 375 | error = -EAGAIN; | 375 | error = -EAGAIN; |
| 376 | break; | 376 | break; |
| 377 | } | 377 | } |
| @@ -772,7 +772,7 @@ generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, | |||
| 772 | ssize_t ret; | 772 | ssize_t ret; |
| 773 | int err; | 773 | int err; |
| 774 | 774 | ||
| 775 | err = remove_suid(out->f_path.dentry); | 775 | err = file_remove_suid(out); |
| 776 | if (unlikely(err)) | 776 | if (unlikely(err)) |
| 777 | return err; | 777 | return err; |
| 778 | 778 | ||
| @@ -830,7 +830,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, | |||
| 830 | ssize_t ret; | 830 | ssize_t ret; |
| 831 | 831 | ||
| 832 | inode_double_lock(inode, pipe->inode); | 832 | inode_double_lock(inode, pipe->inode); |
| 833 | ret = remove_suid(out->f_path.dentry); | 833 | ret = file_remove_suid(out); |
| 834 | if (likely(!ret)) | 834 | if (likely(!ret)) |
| 835 | ret = __splice_from_pipe(pipe, &sd, pipe_to_file); | 835 | ret = __splice_from_pipe(pipe, &sd, pipe_to_file); |
| 836 | inode_double_unlock(inode, pipe->inode); | 836 | inode_double_unlock(inode, pipe->inode); |
| @@ -1161,36 +1161,6 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
| 1161 | } | 1161 | } |
| 1162 | 1162 | ||
| 1163 | /* | 1163 | /* |
| 1164 | * Do a copy-from-user while holding the mmap_semaphore for reading, in a | ||
| 1165 | * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem | ||
| 1166 | * for writing) and page faulting on the user memory pointed to by src. | ||
| 1167 | * This assumes that we will very rarely hit the partial != 0 path, or this | ||
| 1168 | * will not be a win. | ||
| 1169 | */ | ||
| 1170 | static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) | ||
| 1171 | { | ||
| 1172 | int partial; | ||
| 1173 | |||
| 1174 | if (!access_ok(VERIFY_READ, src, n)) | ||
| 1175 | return -EFAULT; | ||
| 1176 | |||
| 1177 | pagefault_disable(); | ||
| 1178 | partial = __copy_from_user_inatomic(dst, src, n); | ||
| 1179 | pagefault_enable(); | ||
| 1180 | |||
| 1181 | /* | ||
| 1182 | * Didn't copy everything, drop the mmap_sem and do a faulting copy | ||
| 1183 | */ | ||
| 1184 | if (unlikely(partial)) { | ||
| 1185 | up_read(¤t->mm->mmap_sem); | ||
| 1186 | partial = copy_from_user(dst, src, n); | ||
| 1187 | down_read(¤t->mm->mmap_sem); | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | return partial; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | /* | ||
| 1194 | * Map an iov into an array of pages and offset/length tupples. With the | 1164 | * Map an iov into an array of pages and offset/length tupples. With the |
| 1195 | * partial_page structure, we can map several non-contiguous ranges into | 1165 | * partial_page structure, we can map several non-contiguous ranges into |
| 1196 | * our ones pages[] map instead of splitting that operation into pieces. | 1166 | * our ones pages[] map instead of splitting that operation into pieces. |
| @@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov, | |||
| 1203 | { | 1173 | { |
| 1204 | int buffers = 0, error = 0; | 1174 | int buffers = 0, error = 0; |
| 1205 | 1175 | ||
| 1206 | down_read(¤t->mm->mmap_sem); | ||
| 1207 | |||
| 1208 | while (nr_vecs) { | 1176 | while (nr_vecs) { |
| 1209 | unsigned long off, npages; | 1177 | unsigned long off, npages; |
| 1210 | struct iovec entry; | 1178 | struct iovec entry; |
| @@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov, | |||
| 1213 | int i; | 1181 | int i; |
| 1214 | 1182 | ||
| 1215 | error = -EFAULT; | 1183 | error = -EFAULT; |
| 1216 | if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) | 1184 | if (copy_from_user(&entry, iov, sizeof(entry))) |
| 1217 | break; | 1185 | break; |
| 1218 | 1186 | ||
| 1219 | base = entry.iov_base; | 1187 | base = entry.iov_base; |
| @@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov, | |||
| 1247 | if (npages > PIPE_BUFFERS - buffers) | 1215 | if (npages > PIPE_BUFFERS - buffers) |
| 1248 | npages = PIPE_BUFFERS - buffers; | 1216 | npages = PIPE_BUFFERS - buffers; |
| 1249 | 1217 | ||
| 1250 | error = get_user_pages(current, current->mm, | 1218 | error = get_user_pages_fast((unsigned long)base, npages, |
| 1251 | (unsigned long) base, npages, 0, 0, | 1219 | 0, &pages[buffers]); |
| 1252 | &pages[buffers], NULL); | ||
| 1253 | 1220 | ||
| 1254 | if (unlikely(error <= 0)) | 1221 | if (unlikely(error <= 0)) |
| 1255 | break; | 1222 | break; |
| @@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov, | |||
| 1288 | iov++; | 1255 | iov++; |
| 1289 | } | 1256 | } |
| 1290 | 1257 | ||
| 1291 | up_read(¤t->mm->mmap_sem); | ||
| 1292 | |||
| 1293 | if (buffers) | 1258 | if (buffers) |
| 1294 | return buffers; | 1259 | return buffers; |
| 1295 | 1260 | ||
