aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-03-21 04:58:33 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2014-05-06 17:32:53 -0400
commit91f79c43d1b54d7154b118860d81b39bad07dfff (patch)
treea5b142ba57fdabf835476b6dbca24288a78f0c53 /drivers
parentf67da30c1d5fc9e341bc8121708874bfd7b31e45 (diff)
new helper: iov_iter_get_pages_alloc()
same as iov_iter_get_pages(), except that pages array is allocated (kmalloc if possible, vmalloc if that fails) and left for caller to free. Lustre and NFS ->direct_IO() switched to it. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c92
1 files changed, 37 insertions, 55 deletions
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index f718585c9e08..6b5994577b6b 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -218,14 +218,11 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
218 int i; 218 int i;
219 219
220 for (i = 0; i < npages; i++) { 220 for (i = 0; i < npages; i++) {
221 if (pages[i] == NULL)
222 break;
223 if (do_dirty) 221 if (do_dirty)
224 set_page_dirty_lock(pages[i]); 222 set_page_dirty_lock(pages[i]);
225 page_cache_release(pages[i]); 223 page_cache_release(pages[i]);
226 } 224 }
227 225 kvfree(pages);
228 OBD_FREE_LARGE(pages, npages * sizeof(*pages));
229} 226}
230 227
231ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, 228ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -370,10 +367,9 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
370 struct file *file = iocb->ki_filp; 367 struct file *file = iocb->ki_filp;
371 struct inode *inode = file->f_mapping->host; 368 struct inode *inode = file->f_mapping->host;
372 struct ccc_object *obj = cl_inode2ccc(inode); 369 struct ccc_object *obj = cl_inode2ccc(inode);
373 long count = iov_iter_count(iter); 370 ssize_t count = iov_iter_count(iter);
374 long tot_bytes = 0, result = 0; 371 ssize_t tot_bytes = 0, result = 0;
375 struct ll_inode_info *lli = ll_i2info(inode); 372 struct ll_inode_info *lli = ll_i2info(inode);
376 unsigned long seg = 0;
377 long size = MAX_DIO_SIZE; 373 long size = MAX_DIO_SIZE;
378 int refcheck; 374 int refcheck;
379 375
@@ -407,63 +403,49 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
407 mutex_lock(&inode->i_mutex); 403 mutex_lock(&inode->i_mutex);
408 404
409 LASSERT(obj->cob_transient_pages == 0); 405 LASSERT(obj->cob_transient_pages == 0);
410 for (seg = 0; seg < iter->nr_segs; seg++) { 406 while (iov_iter_count(iter)) {
411 long iov_left = iter->iov[seg].iov_len; 407 struct page **pages;
412 unsigned long user_addr = (unsigned long)iter->iov[seg].iov_base; 408 size_t offs;
413 409
410 count = min_t(size_t, iov_iter_count(iter), size);
414 if (rw == READ) { 411 if (rw == READ) {
415 if (file_offset >= i_size_read(inode)) 412 if (file_offset >= i_size_read(inode))
416 break; 413 break;
417 if (file_offset + iov_left > i_size_read(inode)) 414 if (file_offset + count > i_size_read(inode))
418 iov_left = i_size_read(inode) - file_offset; 415 count = i_size_read(inode) - file_offset;
419 } 416 }
420 417
421 while (iov_left > 0) { 418 result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
422 struct page **pages; 419 if (likely(result > 0)) {
423 int page_count, max_pages = 0; 420 int n = (result + offs + PAGE_SIZE - 1) / PAGE_SIZE;
424 long bytes; 421 result = ll_direct_IO_26_seg(env, io, rw, inode,
425 422 file->f_mapping,
426 bytes = min(size, iov_left); 423 result, file_offset,
427 page_count = ll_get_user_pages(rw, user_addr, bytes, 424 pages, n);
428 &pages, &max_pages); 425 ll_free_user_pages(pages, n, rw==READ);
429 if (likely(page_count > 0)) { 426 }
430 if (unlikely(page_count < max_pages)) 427 if (unlikely(result <= 0)) {
431 bytes = page_count << PAGE_CACHE_SHIFT; 428 /* If we can't allocate a large enough buffer
432 result = ll_direct_IO_26_seg(env, io, rw, inode, 429 * for the request, shrink it to a smaller
433 file->f_mapping, 430 * PAGE_SIZE multiple and try again.
434 bytes, file_offset, 431 * We should always be able to kmalloc for a
435 pages, page_count); 432 * page worth of page pointers = 4MB on i386. */
436 ll_free_user_pages(pages, max_pages, rw==READ); 433 if (result == -ENOMEM &&
437 } else if (page_count == 0) { 434 size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
438 GOTO(out, result = -EFAULT); 435 PAGE_CACHE_SIZE) {
439 } else { 436 size = ((((size / 2) - 1) |
440 result = page_count; 437 ~CFS_PAGE_MASK) + 1) &
441 } 438 CFS_PAGE_MASK;
442 if (unlikely(result <= 0)) { 439 CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
443 /* If we can't allocate a large enough buffer 440 size);
444 * for the request, shrink it to a smaller 441 continue;
445 * PAGE_SIZE multiple and try again.
446 * We should always be able to kmalloc for a
447 * page worth of page pointers = 4MB on i386. */
448 if (result == -ENOMEM &&
449 size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
450 PAGE_CACHE_SIZE) {
451 size = ((((size / 2) - 1) |
452 ~CFS_PAGE_MASK) + 1) &
453 CFS_PAGE_MASK;
454 CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
455 size);
456 continue;
457 }
458
459 GOTO(out, result);
460 } 442 }
461 443
462 tot_bytes += result; 444 GOTO(out, result);
463 file_offset += result;
464 iov_left -= result;
465 user_addr += result;
466 } 445 }
446 iov_iter_advance(iter, result);
447 tot_bytes += result;
448 file_offset += result;
467 } 449 }
468out: 450out:
469 LASSERT(obj->cob_transient_pages == 0); 451 LASSERT(obj->cob_transient_pages == 0);