aboutsummaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-07-25 22:45:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:06 -0400
commitbc40d73c950146725e9e768e856a416ec8949065 (patch)
tree7d12351ddeb6a281ff7a9b5db543067c710d1859 /fs/splice.c
parentf5dd33c494a427b1d1a3b574de5c9e511c888864 (diff)
splice: use get_user_pages_fast
Use get_user_pages_fast in splice. This reverts some mmap_sem batching there, however the biggest problem with mmap_sem tends to be hold times blocking out other threads rather than cacheline bouncing. Further: on architectures that implement get_user_pages_fast without locks, mmap_sem can be avoided completely anyway. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Dave Kleikamp <shaggy@austin.ibm.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Andi Kleen <andi@firstfloor.org> Cc: Dave Kleikamp <shaggy@austin.ibm.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Cc: Zach Brown <zach.brown@oracle.com> Cc: Jens Axboe <jens.axboe@oracle.com> Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c41
1 files changed, 3 insertions, 38 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 399442179d89..47dc1a445d1f 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1161,36 +1161,6 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1161} 1161}
1162 1162
1163/* 1163/*
1164 * Do a copy-from-user while holding the mmap_semaphore for reading, in a
1165 * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
1166 * for writing) and page faulting on the user memory pointed to by src.
1167 * This assumes that we will very rarely hit the partial != 0 path, or this
1168 * will not be a win.
1169 */
1170static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
1171{
1172 int partial;
1173
1174 if (!access_ok(VERIFY_READ, src, n))
1175 return -EFAULT;
1176
1177 pagefault_disable();
1178 partial = __copy_from_user_inatomic(dst, src, n);
1179 pagefault_enable();
1180
1181 /*
1182 * Didn't copy everything, drop the mmap_sem and do a faulting copy
1183 */
1184 if (unlikely(partial)) {
1185 up_read(&current->mm->mmap_sem);
1186 partial = copy_from_user(dst, src, n);
1187 down_read(&current->mm->mmap_sem);
1188 }
1189
1190 return partial;
1191}
1192
1193/*
1194 * Map an iov into an array of pages and offset/length tupples. With the 1164 * Map an iov into an array of pages and offset/length tupples. With the
1195 * partial_page structure, we can map several non-contiguous ranges into 1165 * partial_page structure, we can map several non-contiguous ranges into
1196 * our ones pages[] map instead of splitting that operation into pieces. 1166 * our ones pages[] map instead of splitting that operation into pieces.
@@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1203{ 1173{
1204 int buffers = 0, error = 0; 1174 int buffers = 0, error = 0;
1205 1175
1206 down_read(&current->mm->mmap_sem);
1207
1208 while (nr_vecs) { 1176 while (nr_vecs) {
1209 unsigned long off, npages; 1177 unsigned long off, npages;
1210 struct iovec entry; 1178 struct iovec entry;
@@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1213 int i; 1181 int i;
1214 1182
1215 error = -EFAULT; 1183 error = -EFAULT;
1216 if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) 1184 if (copy_from_user(&entry, iov, sizeof(entry)))
1217 break; 1185 break;
1218 1186
1219 base = entry.iov_base; 1187 base = entry.iov_base;
@@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1247 if (npages > PIPE_BUFFERS - buffers) 1215 if (npages > PIPE_BUFFERS - buffers)
1248 npages = PIPE_BUFFERS - buffers; 1216 npages = PIPE_BUFFERS - buffers;
1249 1217
1250 error = get_user_pages(current, current->mm, 1218 error = get_user_pages_fast((unsigned long)base, npages,
1251 (unsigned long) base, npages, 0, 0, 1219 0, &pages[buffers]);
1252 &pages[buffers], NULL);
1253 1220
1254 if (unlikely(error <= 0)) 1221 if (unlikely(error <= 0))
1255 break; 1222 break;
@@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
1288 iov++; 1255 iov++;
1289 } 1256 }
1290 1257
1291 up_read(&current->mm->mmap_sem);
1292
1293 if (buffers) 1258 if (buffers)
1294 return buffers; 1259 return buffers;
1295 1260